VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 98193

Last change on this file since 98193 was 98150, checked in by vboxsync, 2 years ago

VMM: Nested VMX: bugref:10318 Fix VMX CR0/CR4 fixed bits masking.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 525.6 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 98150 2023-01-20 06:46:21Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330 VMX_VMCS16_HLAT_PREFIX_SIZE,
331
332 /* 16-bit guest-state fields. */
333 VMX_VMCS16_GUEST_ES_SEL,
334 VMX_VMCS16_GUEST_CS_SEL,
335 VMX_VMCS16_GUEST_SS_SEL,
336 VMX_VMCS16_GUEST_DS_SEL,
337 VMX_VMCS16_GUEST_FS_SEL,
338 VMX_VMCS16_GUEST_GS_SEL,
339 VMX_VMCS16_GUEST_LDTR_SEL,
340 VMX_VMCS16_GUEST_TR_SEL,
341 VMX_VMCS16_GUEST_INTR_STATUS,
342 VMX_VMCS16_GUEST_PML_INDEX,
343
344 /* 16-bits host-state fields. */
345 VMX_VMCS16_HOST_ES_SEL,
346 VMX_VMCS16_HOST_CS_SEL,
347 VMX_VMCS16_HOST_SS_SEL,
348 VMX_VMCS16_HOST_DS_SEL,
349 VMX_VMCS16_HOST_FS_SEL,
350 VMX_VMCS16_HOST_GS_SEL,
351 VMX_VMCS16_HOST_TR_SEL,
352
353 /* 64-bit control fields. */
354 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
355 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
357 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
358 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
359 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
361 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
363 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
365 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
367 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
369 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
370 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
371 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
373 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
375 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
377 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
379 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
380 VMX_VMCS64_CTRL_EPTP_FULL,
381 VMX_VMCS64_CTRL_EPTP_HIGH,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
383 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
385 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
387 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
389 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
390 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
391 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
393 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
395 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
397 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
399 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
401 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
402 VMX_VMCS64_CTRL_SPPTP_FULL,
403 VMX_VMCS64_CTRL_SPPTP_HIGH,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
405 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
406 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
407 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
410 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_FULL,
411 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_HIGH,
412 VMX_VMCS64_CTRL_HLAT_PTR_FULL,
413 VMX_VMCS64_CTRL_HLAT_PTR_HIGH,
414 VMX_VMCS64_CTRL_EXIT2_FULL,
415 VMX_VMCS64_CTRL_EXIT2_HIGH,
416
417 /* 64-bit read-only data fields. */
418 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
419 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
420
421 /* 64-bit guest-state fields. */
422 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
423 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
424 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
425 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
426 VMX_VMCS64_GUEST_PAT_FULL,
427 VMX_VMCS64_GUEST_PAT_HIGH,
428 VMX_VMCS64_GUEST_EFER_FULL,
429 VMX_VMCS64_GUEST_EFER_HIGH,
430 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
431 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
432 VMX_VMCS64_GUEST_PDPTE0_FULL,
433 VMX_VMCS64_GUEST_PDPTE0_HIGH,
434 VMX_VMCS64_GUEST_PDPTE1_FULL,
435 VMX_VMCS64_GUEST_PDPTE1_HIGH,
436 VMX_VMCS64_GUEST_PDPTE2_FULL,
437 VMX_VMCS64_GUEST_PDPTE2_HIGH,
438 VMX_VMCS64_GUEST_PDPTE3_FULL,
439 VMX_VMCS64_GUEST_PDPTE3_HIGH,
440 VMX_VMCS64_GUEST_BNDCFGS_FULL,
441 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
442 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
443 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
444 VMX_VMCS64_GUEST_PKRS_FULL,
445 VMX_VMCS64_GUEST_PKRS_HIGH,
446
447 /* 64-bit host-state fields. */
448 VMX_VMCS64_HOST_PAT_FULL,
449 VMX_VMCS64_HOST_PAT_HIGH,
450 VMX_VMCS64_HOST_EFER_FULL,
451 VMX_VMCS64_HOST_EFER_HIGH,
452 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
453 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
454 VMX_VMCS64_HOST_PKRS_FULL,
455 VMX_VMCS64_HOST_PKRS_HIGH,
456
457 /* 32-bit control fields. */
458 VMX_VMCS32_CTRL_PIN_EXEC,
459 VMX_VMCS32_CTRL_PROC_EXEC,
460 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
461 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
462 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
463 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
464 VMX_VMCS32_CTRL_EXIT,
465 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
466 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
467 VMX_VMCS32_CTRL_ENTRY,
468 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
469 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
470 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
471 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
472 VMX_VMCS32_CTRL_TPR_THRESHOLD,
473 VMX_VMCS32_CTRL_PROC_EXEC2,
474 VMX_VMCS32_CTRL_PLE_GAP,
475 VMX_VMCS32_CTRL_PLE_WINDOW,
476
477 /* 32-bits read-only fields. */
478 VMX_VMCS32_RO_VM_INSTR_ERROR,
479 VMX_VMCS32_RO_EXIT_REASON,
480 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
481 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
482 VMX_VMCS32_RO_IDT_VECTORING_INFO,
483 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
484 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
485 VMX_VMCS32_RO_EXIT_INSTR_INFO,
486
487 /* 32-bit guest-state fields. */
488 VMX_VMCS32_GUEST_ES_LIMIT,
489 VMX_VMCS32_GUEST_CS_LIMIT,
490 VMX_VMCS32_GUEST_SS_LIMIT,
491 VMX_VMCS32_GUEST_DS_LIMIT,
492 VMX_VMCS32_GUEST_FS_LIMIT,
493 VMX_VMCS32_GUEST_GS_LIMIT,
494 VMX_VMCS32_GUEST_LDTR_LIMIT,
495 VMX_VMCS32_GUEST_TR_LIMIT,
496 VMX_VMCS32_GUEST_GDTR_LIMIT,
497 VMX_VMCS32_GUEST_IDTR_LIMIT,
498 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
501 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
502 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
503 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
504 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
505 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
506 VMX_VMCS32_GUEST_INT_STATE,
507 VMX_VMCS32_GUEST_ACTIVITY_STATE,
508 VMX_VMCS32_GUEST_SMBASE,
509 VMX_VMCS32_GUEST_SYSENTER_CS,
510 VMX_VMCS32_PREEMPT_TIMER_VALUE,
511
512 /* 32-bit host-state fields. */
513 VMX_VMCS32_HOST_SYSENTER_CS,
514
515 /* Natural-width control fields. */
516 VMX_VMCS_CTRL_CR0_MASK,
517 VMX_VMCS_CTRL_CR4_MASK,
518 VMX_VMCS_CTRL_CR0_READ_SHADOW,
519 VMX_VMCS_CTRL_CR4_READ_SHADOW,
520 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
521 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
522 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
523 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
524
525 /* Natural-width read-only data fields. */
526 VMX_VMCS_RO_EXIT_QUALIFICATION,
527 VMX_VMCS_RO_IO_RCX,
528 VMX_VMCS_RO_IO_RSI,
529 VMX_VMCS_RO_IO_RDI,
530 VMX_VMCS_RO_IO_RIP,
531 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
532
533 /* Natural-width guest-state field */
534 VMX_VMCS_GUEST_CR0,
535 VMX_VMCS_GUEST_CR3,
536 VMX_VMCS_GUEST_CR4,
537 VMX_VMCS_GUEST_ES_BASE,
538 VMX_VMCS_GUEST_CS_BASE,
539 VMX_VMCS_GUEST_SS_BASE,
540 VMX_VMCS_GUEST_DS_BASE,
541 VMX_VMCS_GUEST_FS_BASE,
542 VMX_VMCS_GUEST_GS_BASE,
543 VMX_VMCS_GUEST_LDTR_BASE,
544 VMX_VMCS_GUEST_TR_BASE,
545 VMX_VMCS_GUEST_GDTR_BASE,
546 VMX_VMCS_GUEST_IDTR_BASE,
547 VMX_VMCS_GUEST_DR7,
548 VMX_VMCS_GUEST_RSP,
549 VMX_VMCS_GUEST_RIP,
550 VMX_VMCS_GUEST_RFLAGS,
551 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
552 VMX_VMCS_GUEST_SYSENTER_ESP,
553 VMX_VMCS_GUEST_SYSENTER_EIP,
554 VMX_VMCS_GUEST_S_CET,
555 VMX_VMCS_GUEST_SSP,
556 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
557
558 /* Natural-width host-state fields */
559 VMX_VMCS_HOST_CR0,
560 VMX_VMCS_HOST_CR3,
561 VMX_VMCS_HOST_CR4,
562 VMX_VMCS_HOST_FS_BASE,
563 VMX_VMCS_HOST_GS_BASE,
564 VMX_VMCS_HOST_TR_BASE,
565 VMX_VMCS_HOST_GDTR_BASE,
566 VMX_VMCS_HOST_IDTR_BASE,
567 VMX_VMCS_HOST_SYSENTER_ESP,
568 VMX_VMCS_HOST_SYSENTER_EIP,
569 VMX_VMCS_HOST_RSP,
570 VMX_VMCS_HOST_RIP,
571 VMX_VMCS_HOST_S_CET,
572 VMX_VMCS_HOST_SSP,
573 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
574};
575#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
576
577#ifdef HMVMX_USE_FUNCTION_TABLE
578/**
579 * VMX_EXIT dispatch table.
580 */
581static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
582{
583 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
584 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
585 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
586 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
587 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
588 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
589 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
590 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
591 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
592 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
593 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
594 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
595 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
596 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
597 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
598 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
599 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
600 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
601 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
602#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
603 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
604 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
605 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
606 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
607 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
608 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
609 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
610 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
611 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
612#else
613 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
614 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
615 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
616 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
617 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
618 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
619 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
620 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
621 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
622#endif
623 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
624 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
625 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
626 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
627 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
628 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
629 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
630 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
632 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
633 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
634 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
635 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
636 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
637 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
638 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
639 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
640 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
641 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
642 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
643 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
644 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
646 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
647#else
648 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
651 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
652#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
653 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
654#else
655 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
656#endif
657 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
658 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
659 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
660 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
661 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
662 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
663 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
664 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
665 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
666 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
667 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
668 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
669 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
670 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
671 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
672 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
673};
674#endif /* HMVMX_USE_FUNCTION_TABLE */
675
676#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
677static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
678{
679 /* 0 */ "(Not Used)",
680 /* 1 */ "VMCALL executed in VMX root operation.",
681 /* 2 */ "VMCLEAR with invalid physical address.",
682 /* 3 */ "VMCLEAR with VMXON pointer.",
683 /* 4 */ "VMLAUNCH with non-clear VMCS.",
684 /* 5 */ "VMRESUME with non-launched VMCS.",
685 /* 6 */ "VMRESUME after VMXOFF",
686 /* 7 */ "VM-entry with invalid control fields.",
687 /* 8 */ "VM-entry with invalid host state fields.",
688 /* 9 */ "VMPTRLD with invalid physical address.",
689 /* 10 */ "VMPTRLD with VMXON pointer.",
690 /* 11 */ "VMPTRLD with incorrect revision identifier.",
691 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
692 /* 13 */ "VMWRITE to read-only VMCS component.",
693 /* 14 */ "(Not Used)",
694 /* 15 */ "VMXON executed in VMX root operation.",
695 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
696 /* 17 */ "VM-entry with non-launched executing VMCS.",
697 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
698 /* 19 */ "VMCALL with non-clear VMCS.",
699 /* 20 */ "VMCALL with invalid VM-exit control fields.",
700 /* 21 */ "(Not Used)",
701 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
702 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
703 /* 24 */ "VMCALL with invalid SMM-monitor features.",
704 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
705 /* 26 */ "VM-entry with events blocked by MOV SS.",
706 /* 27 */ "(Not Used)",
707 /* 28 */ "Invalid operand to INVEPT/INVVPID."
708};
709#endif /* VBOX_STRICT && LOG_ENABLED */
710
711
712/**
713 * Gets the CR0 guest/host mask.
714 *
715 * These bits typically does not change through the lifetime of a VM. Any bit set in
716 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
717 * by the guest.
718 *
719 * @returns The CR0 guest/host mask.
720 * @param pVCpu The cross context virtual CPU structure.
721 */
722static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
723{
724 /*
725 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
726 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
727 *
728 * Furthermore, modifications to any bits that are reserved/unspecified currently
729 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
730 * when future CPUs specify and use currently reserved/unspecified bits.
731 */
732 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
733 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
734 * and @bugref{6944}. */
735 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
736 AssertCompile(RT_HI_U32(VMX_EXIT_HOST_CR0_IGNORE_MASK) == UINT32_C(0xffffffff)); /* Paranoia. */
737 return ( X86_CR0_PE
738 | X86_CR0_NE
739 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
740 | X86_CR0_PG
741 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
742}
743
744
745/**
746 * Gets the CR4 guest/host mask.
747 *
748 * These bits typically does not change through the lifetime of a VM. Any bit set in
749 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
750 * by the guest.
751 *
752 * @returns The CR4 guest/host mask.
753 * @param pVCpu The cross context virtual CPU structure.
754 */
755static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
756{
757 /*
758 * We construct a mask of all CR4 bits that the guest can modify without causing
759 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
760 * a VM-exit when the guest attempts to modify them when executing using
761 * hardware-assisted VMX.
762 *
763 * When a feature is not exposed to the guest (and may be present on the host),
764 * we want to intercept guest modifications to the bit so we can emulate proper
765 * behavior (e.g., #GP).
766 *
767 * Furthermore, only modifications to those bits that don't require immediate
768 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
769 * depends on CR3 which might not always be the guest value while executing
770 * using hardware-assisted VMX.
771 */
772 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
773 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
774#ifdef IN_NEM_DARWIN
775 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
776#endif
777 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
778
779 /*
780 * Paranoia.
781 * Ensure features exposed to the guest are present on the host.
782 */
783 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
784#ifdef IN_NEM_DARWIN
785 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
786#endif
787 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
788
789 uint64_t const fGstMask = X86_CR4_PVI
790 | X86_CR4_TSD
791 | X86_CR4_DE
792 | X86_CR4_MCE
793 | X86_CR4_PCE
794 | X86_CR4_OSXMMEEXCPT
795 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
796#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
797 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
798 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
799#endif
800 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
801 return ~fGstMask;
802}
803
804
805/**
806 * Adds one or more exceptions to the exception bitmap and commits it to the current
807 * VMCS.
808 *
809 * @param pVCpu The cross context virtual CPU structure.
810 * @param pVmxTransient The VMX-transient structure.
811 * @param uXcptMask The exception(s) to add.
812 */
813static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
814{
815 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
816 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
817 if ((uXcptBitmap & uXcptMask) != uXcptMask)
818 {
819 uXcptBitmap |= uXcptMask;
820 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
821 AssertRC(rc);
822 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
823 }
824}
825
826
827/**
828 * Adds an exception to the exception bitmap and commits it to the current VMCS.
829 *
830 * @param pVCpu The cross context virtual CPU structure.
831 * @param pVmxTransient The VMX-transient structure.
832 * @param uXcpt The exception to add.
833 */
834static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
835{
836 Assert(uXcpt <= X86_XCPT_LAST);
837 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
838}
839
840
841/**
842 * Remove one or more exceptions from the exception bitmap and commits it to the
843 * current VMCS.
844 *
845 * This takes care of not removing the exception intercept if a nested-guest
846 * requires the exception to be intercepted.
847 *
848 * @returns VBox status code.
849 * @param pVCpu The cross context virtual CPU structure.
850 * @param pVmxTransient The VMX-transient structure.
851 * @param uXcptMask The exception(s) to remove.
852 */
853static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
854{
855 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
856 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
857 if (uXcptBitmap & uXcptMask)
858 {
859#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
860 if (!pVmxTransient->fIsNestedGuest)
861 { /* likely */ }
862 else
863 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
864#endif
865#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
866 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
867 | RT_BIT(X86_XCPT_DE)
868 | RT_BIT(X86_XCPT_NM)
869 | RT_BIT(X86_XCPT_TS)
870 | RT_BIT(X86_XCPT_UD)
871 | RT_BIT(X86_XCPT_NP)
872 | RT_BIT(X86_XCPT_SS)
873 | RT_BIT(X86_XCPT_GP)
874 | RT_BIT(X86_XCPT_PF)
875 | RT_BIT(X86_XCPT_MF));
876#elif defined(HMVMX_ALWAYS_TRAP_PF)
877 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
878#endif
879 if (uXcptMask)
880 {
881 /* Validate we are not removing any essential exception intercepts. */
882#ifndef IN_NEM_DARWIN
883 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
884#else
885 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
886#endif
887 NOREF(pVCpu);
888 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
889 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
890
891 /* Remove it from the exception bitmap. */
892 uXcptBitmap &= ~uXcptMask;
893
894 /* Commit and update the cache if necessary. */
895 if (pVmcsInfo->u32XcptBitmap != uXcptBitmap)
896 {
897 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
898 AssertRC(rc);
899 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
900 }
901 }
902 }
903 return VINF_SUCCESS;
904}
905
906
907/**
908 * Remove an exceptions from the exception bitmap and commits it to the current
909 * VMCS.
910 *
911 * @returns VBox status code.
912 * @param pVCpu The cross context virtual CPU structure.
913 * @param pVmxTransient The VMX-transient structure.
914 * @param uXcpt The exception to remove.
915 */
916static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
917{
918 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
919}
920
921#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
922
923/**
924 * Loads the shadow VMCS specified by the VMCS info. object.
925 *
926 * @returns VBox status code.
927 * @param pVmcsInfo The VMCS info. object.
928 *
929 * @remarks Can be called with interrupts disabled.
930 */
931static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
932{
933 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
934 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
935
936 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
937 if (RT_SUCCESS(rc))
938 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
939 return rc;
940}
941
942
943/**
944 * Clears the shadow VMCS specified by the VMCS info. object.
945 *
946 * @returns VBox status code.
947 * @param pVmcsInfo The VMCS info. object.
948 *
949 * @remarks Can be called with interrupts disabled.
950 */
951static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
952{
953 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
954 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
955
956 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
957 if (RT_SUCCESS(rc))
958 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
959 return rc;
960}
961
962
963/**
964 * Switches from and to the specified VMCSes.
965 *
966 * @returns VBox status code.
967 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
968 * @param pVmcsInfoTo The VMCS info. object we are switching to.
969 *
970 * @remarks Called with interrupts disabled.
971 */
972static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
973{
974 /*
975 * Clear the VMCS we are switching out if it has not already been cleared.
976 * This will sync any CPU internal data back to the VMCS.
977 */
978 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
979 {
980 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
981 if (RT_SUCCESS(rc))
982 {
983 /*
984 * The shadow VMCS, if any, would not be active at this point since we
985 * would have cleared it while importing the virtual hardware-virtualization
986 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
987 * clear the shadow VMCS here, just assert for safety.
988 */
989 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
990 }
991 else
992 return rc;
993 }
994
995 /*
996 * Clear the VMCS we are switching to if it has not already been cleared.
997 * This will initialize the VMCS launch state to "clear" required for loading it.
998 *
999 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1000 */
1001 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1002 {
1003 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1004 if (RT_SUCCESS(rc))
1005 { /* likely */ }
1006 else
1007 return rc;
1008 }
1009
1010 /*
1011 * Finally, load the VMCS we are switching to.
1012 */
1013 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1014}
1015
1016
1017/**
1018 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1019 * caller.
1020 *
1021 * @returns VBox status code.
1022 * @param pVCpu The cross context virtual CPU structure.
1023 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1024 * true) or guest VMCS (pass false).
1025 */
1026static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1027{
1028 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1029 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1030
1031 PVMXVMCSINFO pVmcsInfoFrom;
1032 PVMXVMCSINFO pVmcsInfoTo;
1033 if (fSwitchToNstGstVmcs)
1034 {
1035 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1036 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1037 }
1038 else
1039 {
1040 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1041 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1042 }
1043
1044 /*
1045 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1046 * preemption hook code path acquires the current VMCS.
1047 */
1048 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1049
1050 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1051 if (RT_SUCCESS(rc))
1052 {
1053 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1054 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1055
1056 /*
1057 * If we are switching to a VMCS that was executed on a different host CPU or was
1058 * never executed before, flag that we need to export the host state before executing
1059 * guest/nested-guest code using hardware-assisted VMX.
1060 *
1061 * This could probably be done in a preemptible context since the preemption hook
1062 * will flag the necessary change in host context. However, since preemption is
1063 * already disabled and to avoid making assumptions about host specific code in
1064 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1065 * disabled.
1066 */
1067 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1068 { /* likely */ }
1069 else
1070 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1071
1072 ASMSetFlags(fEFlags);
1073
1074 /*
1075 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1076 * flag that we need to update the host MSR values there. Even if we decide in the
1077 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1078 * if its content differs, we would have to update the host MSRs anyway.
1079 */
1080 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1081 }
1082 else
1083 ASMSetFlags(fEFlags);
1084 return rc;
1085}
1086
1087#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1088#ifdef VBOX_STRICT
1089
1090/**
1091 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1092 * transient structure.
1093 *
1094 * @param pVCpu The cross context virtual CPU structure.
1095 * @param pVmxTransient The VMX-transient structure.
1096 */
1097DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1098{
1099 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1100 AssertRC(rc);
1101}
1102
1103
1104/**
1105 * Reads the VM-entry exception error code field from the VMCS into
1106 * the VMX transient structure.
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @param pVmxTransient The VMX-transient structure.
1110 */
1111DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1112{
1113 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1114 AssertRC(rc);
1115}
1116
1117
1118/**
1119 * Reads the VM-entry exception error code field from the VMCS into
1120 * the VMX transient structure.
1121 *
1122 * @param pVCpu The cross context virtual CPU structure.
1123 * @param pVmxTransient The VMX-transient structure.
1124 */
1125DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1126{
1127 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1128 AssertRC(rc);
1129}
1130
1131#endif /* VBOX_STRICT */
1132
1133
1134/**
1135 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1136 *
1137 * Don't call directly unless the it's likely that some or all of the fields
1138 * given in @a a_fReadMask have already been read.
1139 *
1140 * @tparam a_fReadMask The fields to read.
1141 * @param pVCpu The cross context virtual CPU structure.
1142 * @param pVmxTransient The VMX-transient structure.
1143 */
1144template<uint32_t const a_fReadMask>
1145static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1146{
1147 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1148 | HMVMX_READ_EXIT_INSTR_LEN
1149 | HMVMX_READ_EXIT_INSTR_INFO
1150 | HMVMX_READ_IDT_VECTORING_INFO
1151 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1152 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1153 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1154 | HMVMX_READ_GUEST_LINEAR_ADDR
1155 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1156 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1157 )) == 0);
1158
1159 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1160 {
1161 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1162
1163 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1164 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1165 {
1166 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1167 AssertRC(rc);
1168 }
1169 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1170 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1171 {
1172 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1173 AssertRC(rc);
1174 }
1175 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1176 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1177 {
1178 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1179 AssertRC(rc);
1180 }
1181 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1182 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1183 {
1184 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1185 AssertRC(rc);
1186 }
1187 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1188 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1189 {
1190 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1191 AssertRC(rc);
1192 }
1193 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1194 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1195 {
1196 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1197 AssertRC(rc);
1198 }
1199 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1200 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1201 {
1202 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1203 AssertRC(rc);
1204 }
1205 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1206 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1207 {
1208 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1209 AssertRC(rc);
1210 }
1211 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1212 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1213 {
1214 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1215 AssertRC(rc);
1216 }
1217 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1218 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1219 {
1220 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1221 AssertRC(rc);
1222 }
1223
1224 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1225 }
1226}
1227
1228
1229/**
1230 * Reads VMCS fields into the VMXTRANSIENT structure.
1231 *
1232 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1233 * generating an optimized read sequences w/o any conditionals between in
1234 * non-strict builds.
1235 *
1236 * @tparam a_fReadMask The fields to read. One or more of the
1237 * HMVMX_READ_XXX fields ORed together.
1238 * @param pVCpu The cross context virtual CPU structure.
1239 * @param pVmxTransient The VMX-transient structure.
1240 */
1241template<uint32_t const a_fReadMask>
1242DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1243{
1244 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1245 | HMVMX_READ_EXIT_INSTR_LEN
1246 | HMVMX_READ_EXIT_INSTR_INFO
1247 | HMVMX_READ_IDT_VECTORING_INFO
1248 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1249 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1250 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1251 | HMVMX_READ_GUEST_LINEAR_ADDR
1252 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1253 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1254 )) == 0);
1255
1256 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1257 {
1258 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1259 {
1260 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1261 AssertRC(rc);
1262 }
1263 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1264 {
1265 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1266 AssertRC(rc);
1267 }
1268 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1269 {
1270 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1271 AssertRC(rc);
1272 }
1273 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1274 {
1275 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1276 AssertRC(rc);
1277 }
1278 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1279 {
1280 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1281 AssertRC(rc);
1282 }
1283 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1284 {
1285 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1286 AssertRC(rc);
1287 }
1288 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1289 {
1290 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1291 AssertRC(rc);
1292 }
1293 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1294 {
1295 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1296 AssertRC(rc);
1297 }
1298 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1299 {
1300 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1301 AssertRC(rc);
1302 }
1303 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1304 {
1305 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1306 AssertRC(rc);
1307 }
1308
1309 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1310 }
1311 else
1312 {
1313 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1314 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1315 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1316 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1317 }
1318}
1319
1320
1321#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1322/**
1323 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1324 *
1325 * @param pVCpu The cross context virtual CPU structure.
1326 * @param pVmxTransient The VMX-transient structure.
1327 */
1328static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1329{
1330 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1331 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1332 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1333 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1334 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1335 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1336 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1337 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1338 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1339 AssertRC(rc);
1340 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1341 | HMVMX_READ_EXIT_INSTR_LEN
1342 | HMVMX_READ_EXIT_INSTR_INFO
1343 | HMVMX_READ_IDT_VECTORING_INFO
1344 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1345 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1346 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1347 | HMVMX_READ_GUEST_LINEAR_ADDR
1348 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1349}
1350#endif
1351
1352/**
1353 * Verifies that our cached values of the VMCS fields are all consistent with
1354 * what's actually present in the VMCS.
1355 *
1356 * @returns VBox status code.
1357 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1358 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1359 * VMCS content. HMCPU error-field is
1360 * updated, see VMX_VCI_XXX.
1361 * @param pVCpu The cross context virtual CPU structure.
1362 * @param pVmcsInfo The VMCS info. object.
1363 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1364 */
1365static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1366{
1367 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1368
1369 uint32_t u32Val;
1370 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1371 AssertRC(rc);
1372 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1373 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1374 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1375 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1376
1377 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1378 AssertRC(rc);
1379 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1380 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1381 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1382 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1383
1384 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1385 AssertRC(rc);
1386 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1387 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1388 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1389 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1390
1391 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1392 AssertRC(rc);
1393 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1394 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1395 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1396 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1397
1398 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1399 {
1400 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1401 AssertRC(rc);
1402 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1403 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1404 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1405 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1406 }
1407
1408 uint64_t u64Val;
1409 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1410 {
1411 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1412 AssertRC(rc);
1413 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1414 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1415 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1416 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1417 }
1418
1419 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1420 AssertRC(rc);
1421 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1422 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1423 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1424 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1425
1426 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1427 AssertRC(rc);
1428 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1429 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1430 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1431 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1432
1433 NOREF(pcszVmcs);
1434 return VINF_SUCCESS;
1435}
1436
1437
1438/**
1439 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1440 * VMCS.
1441 *
1442 * This is typically required when the guest changes paging mode.
1443 *
1444 * @returns VBox status code.
1445 * @param pVCpu The cross context virtual CPU structure.
1446 * @param pVmxTransient The VMX-transient structure.
1447 *
1448 * @remarks Requires EFER.
1449 * @remarks No-long-jump zone!!!
1450 */
1451static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1452{
1453 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1454 {
1455 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1456 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1457
1458 /*
1459 * VM-entry controls.
1460 */
1461 {
1462 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1463 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1464
1465 /*
1466 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1467 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1468 *
1469 * For nested-guests, this is a mandatory VM-entry control. It's also
1470 * required because we do not want to leak host bits to the nested-guest.
1471 */
1472 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1473
1474 /*
1475 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1476 *
1477 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1478 * required to get the nested-guest working with hardware-assisted VMX execution.
1479 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1480 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1481 * here rather than while merging the guest VMCS controls.
1482 */
1483 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1484 {
1485 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1486 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1487 }
1488 else
1489 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1490
1491 /*
1492 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1493 *
1494 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1495 * regardless of whether the nested-guest VMCS specifies it because we are free to
1496 * load whatever MSRs we require and we do not need to modify the guest visible copy
1497 * of the VM-entry MSR load area.
1498 */
1499 if ( g_fHmVmxSupportsVmcsEfer
1500#ifndef IN_NEM_DARWIN
1501 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1502#endif
1503 )
1504 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1505 else
1506 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1507
1508 /*
1509 * The following should -not- be set (since we're not in SMM mode):
1510 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1511 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1512 */
1513
1514 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1515 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1516
1517 if ((fVal & fZap) == fVal)
1518 { /* likely */ }
1519 else
1520 {
1521 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1522 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1523 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1524 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1525 }
1526
1527 /* Commit it to the VMCS. */
1528 if (pVmcsInfo->u32EntryCtls != fVal)
1529 {
1530 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1531 AssertRC(rc);
1532 pVmcsInfo->u32EntryCtls = fVal;
1533 }
1534 }
1535
1536 /*
1537 * VM-exit controls.
1538 */
1539 {
1540 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1541 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1542
1543 /*
1544 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1545 * supported the 1-setting of this bit.
1546 *
1547 * For nested-guests, we set the "save debug controls" as the converse
1548 * "load debug controls" is mandatory for nested-guests anyway.
1549 */
1550 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1551
1552 /*
1553 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1554 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1555 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1556 * vmxHCExportHostMsrs().
1557 *
1558 * For nested-guests, we always set this bit as we do not support 32-bit
1559 * hosts.
1560 */
1561 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1562
1563#ifndef IN_NEM_DARWIN
1564 /*
1565 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1566 *
1567 * For nested-guests, we should use the "save IA32_EFER" control if we also
1568 * used the "load IA32_EFER" control while exporting VM-entry controls.
1569 */
1570 if ( g_fHmVmxSupportsVmcsEfer
1571 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1572 {
1573 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1574 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1575 }
1576#endif
1577
1578 /*
1579 * Enable saving of the VMX-preemption timer value on VM-exit.
1580 * For nested-guests, currently not exposed/used.
1581 */
1582 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1583 * the timer value. */
1584 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1585 {
1586 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1587 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1588 }
1589
1590 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1591 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1592
1593 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1594 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1595 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1596
1597 if ((fVal & fZap) == fVal)
1598 { /* likely */ }
1599 else
1600 {
1601 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1602 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1603 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1604 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1605 }
1606
1607 /* Commit it to the VMCS. */
1608 if (pVmcsInfo->u32ExitCtls != fVal)
1609 {
1610 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1611 AssertRC(rc);
1612 pVmcsInfo->u32ExitCtls = fVal;
1613 }
1614 }
1615
1616 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1617 }
1618 return VINF_SUCCESS;
1619}
1620
1621
1622/**
1623 * Sets the TPR threshold in the VMCS.
1624 *
1625 * @param pVCpu The cross context virtual CPU structure.
1626 * @param pVmcsInfo The VMCS info. object.
1627 * @param u32TprThreshold The TPR threshold (task-priority class only).
1628 */
1629DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1630{
1631 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1632 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1633 RT_NOREF(pVmcsInfo);
1634 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1635 AssertRC(rc);
1636}
1637
1638
1639/**
1640 * Exports the guest APIC TPR state into the VMCS.
1641 *
1642 * @param pVCpu The cross context virtual CPU structure.
1643 * @param pVmxTransient The VMX-transient structure.
1644 *
1645 * @remarks No-long-jump zone!!!
1646 */
1647static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1648{
1649 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1650 {
1651 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1652
1653 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1654 if (!pVmxTransient->fIsNestedGuest)
1655 {
1656 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1657 && APICIsEnabled(pVCpu))
1658 {
1659 /*
1660 * Setup TPR shadowing.
1661 */
1662 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1663 {
1664 bool fPendingIntr = false;
1665 uint8_t u8Tpr = 0;
1666 uint8_t u8PendingIntr = 0;
1667 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1668 AssertRC(rc);
1669
1670 /*
1671 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1672 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1673 * priority of the pending interrupt so we can deliver the interrupt. If there
1674 * are no interrupts pending, set threshold to 0 to not cause any
1675 * TPR-below-threshold VM-exits.
1676 */
1677 uint32_t u32TprThreshold = 0;
1678 if (fPendingIntr)
1679 {
1680 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1681 (which is the Task-Priority Class). */
1682 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1683 const uint8_t u8TprPriority = u8Tpr >> 4;
1684 if (u8PendingPriority <= u8TprPriority)
1685 u32TprThreshold = u8PendingPriority;
1686 }
1687
1688 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1689 }
1690 }
1691 }
1692 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1693 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1694 }
1695}
1696
1697
1698/**
1699 * Gets the guest interruptibility-state and updates related force-flags.
1700 *
1701 * @returns Guest's interruptibility-state.
1702 * @param pVCpu The cross context virtual CPU structure.
1703 *
1704 * @remarks No-long-jump zone!!!
1705 */
1706static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1707{
1708 uint32_t fIntrState;
1709
1710 /*
1711 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1712 */
1713 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1714 fIntrState = 0;
1715 else
1716 {
1717 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1718 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1719
1720 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1721 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1722 else
1723 {
1724 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1725
1726 /* Block-by-STI must not be set when interrupts are disabled. */
1727 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1728 }
1729 }
1730
1731 /*
1732 * Check if we should inhibit NMI delivery.
1733 */
1734 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1735 { /* likely */ }
1736 else
1737 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1738
1739 /*
1740 * Validate.
1741 */
1742 /* We don't support block-by-SMI yet.*/
1743 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1744
1745 return fIntrState;
1746}
1747
1748
1749/**
1750 * Exports the exception intercepts required for guest execution in the VMCS.
1751 *
1752 * @param pVCpu The cross context virtual CPU structure.
1753 * @param pVmxTransient The VMX-transient structure.
1754 *
1755 * @remarks No-long-jump zone!!!
1756 */
1757static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1758{
1759 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1760 {
1761 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1762 if ( !pVmxTransient->fIsNestedGuest
1763 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1764 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1765 else
1766 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1767
1768 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1769 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1770 }
1771}
1772
1773
1774/**
1775 * Exports the guest's RIP into the guest-state area in the VMCS.
1776 *
1777 * @param pVCpu The cross context virtual CPU structure.
1778 *
1779 * @remarks No-long-jump zone!!!
1780 */
1781static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1782{
1783 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1784 {
1785 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1786
1787 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1788 AssertRC(rc);
1789
1790 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1791 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1792 }
1793}
1794
1795
1796/**
1797 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1798 *
1799 * @param pVCpu The cross context virtual CPU structure.
1800 * @param pVmxTransient The VMX-transient structure.
1801 *
1802 * @remarks No-long-jump zone!!!
1803 */
1804static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1805{
1806 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1807 {
1808 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1809
1810 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits
1811 of RFLAGS are reserved (MBZ). We use bits 63:24 for internal purposes, so no need
1812 to assert this, the CPUMX86EFLAGS/CPUMX86RFLAGS union masks these off for us.
1813 Use 32-bit VMWRITE. */
1814 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
1815 Assert((fEFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
1816 AssertMsg(!(fEFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), ("%#x\n", fEFlags));
1817
1818#ifndef IN_NEM_DARWIN
1819 /*
1820 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1821 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1822 * can run the real-mode guest code under Virtual 8086 mode.
1823 */
1824 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1825 if (pVmcsInfo->RealMode.fRealOnV86Active)
1826 {
1827 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1828 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1829 Assert(!pVmxTransient->fIsNestedGuest);
1830 pVmcsInfo->RealMode.Eflags.u32 = fEFlags; /* Save the original eflags of the real-mode guest. */
1831 fEFlags |= X86_EFL_VM; /* Set the Virtual 8086 mode bit. */
1832 fEFlags &= ~X86_EFL_IOPL; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1833 }
1834#else
1835 RT_NOREF(pVmxTransient);
1836#endif
1837
1838 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags);
1839 AssertRC(rc);
1840
1841 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1842 Log4Func(("eflags=%#RX32\n", fEFlags));
1843 }
1844}
1845
1846
1847#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1848/**
1849 * Copies the nested-guest VMCS to the shadow VMCS.
1850 *
1851 * @returns VBox status code.
1852 * @param pVCpu The cross context virtual CPU structure.
1853 * @param pVmcsInfo The VMCS info. object.
1854 *
1855 * @remarks No-long-jump zone!!!
1856 */
1857static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1858{
1859 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1860 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1861
1862 /*
1863 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1864 * current VMCS, as we may try saving guest lazy MSRs.
1865 *
1866 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1867 * calling the import VMCS code which is currently performing the guest MSR reads
1868 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1869 * and the rest of the VMX leave session machinery.
1870 */
1871 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1872
1873 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1874 if (RT_SUCCESS(rc))
1875 {
1876 /*
1877 * Copy all guest read/write VMCS fields.
1878 *
1879 * We don't check for VMWRITE failures here for performance reasons and
1880 * because they are not expected to fail, barring irrecoverable conditions
1881 * like hardware errors.
1882 */
1883 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1884 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1885 {
1886 uint64_t u64Val;
1887 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1888 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1889 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1890 }
1891
1892 /*
1893 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1894 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1895 */
1896 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1897 {
1898 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1899 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1900 {
1901 uint64_t u64Val;
1902 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1903 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1904 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1905 }
1906 }
1907
1908 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1909 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1910 }
1911
1912 ASMSetFlags(fEFlags);
1913 return rc;
1914}
1915
1916
1917/**
1918 * Copies the shadow VMCS to the nested-guest VMCS.
1919 *
1920 * @returns VBox status code.
1921 * @param pVCpu The cross context virtual CPU structure.
1922 * @param pVmcsInfo The VMCS info. object.
1923 *
1924 * @remarks Called with interrupts disabled.
1925 */
1926static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1927{
1928 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1929 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1930 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1931
1932 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1933 if (RT_SUCCESS(rc))
1934 {
1935 /*
1936 * Copy guest read/write fields from the shadow VMCS.
1937 * Guest read-only fields cannot be modified, so no need to copy them.
1938 *
1939 * We don't check for VMREAD failures here for performance reasons and
1940 * because they are not expected to fail, barring irrecoverable conditions
1941 * like hardware errors.
1942 */
1943 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1944 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1945 {
1946 uint64_t u64Val;
1947 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1948 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1949 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1950 }
1951
1952 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1953 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1954 }
1955 return rc;
1956}
1957
1958
1959/**
1960 * Enables VMCS shadowing for the given VMCS info. object.
1961 *
1962 * @param pVCpu The cross context virtual CPU structure.
1963 * @param pVmcsInfo The VMCS info. object.
1964 *
1965 * @remarks No-long-jump zone!!!
1966 */
1967static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1968{
1969 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1970 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1971 {
1972 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1973 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1974 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1975 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1976 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1977 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1978 Log4Func(("Enabled\n"));
1979 }
1980}
1981
1982
1983/**
1984 * Disables VMCS shadowing for the given VMCS info. object.
1985 *
1986 * @param pVCpu The cross context virtual CPU structure.
1987 * @param pVmcsInfo The VMCS info. object.
1988 *
1989 * @remarks No-long-jump zone!!!
1990 */
1991static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1992{
1993 /*
1994 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1995 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1996 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
1997 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
1998 *
1999 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2000 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2001 */
2002 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2003 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2004 {
2005 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2006 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2007 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2008 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2009 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2010 Log4Func(("Disabled\n"));
2011 }
2012}
2013#endif
2014
2015
2016/**
2017 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2018 *
2019 * The guest FPU state is always pre-loaded hence we don't need to bother about
2020 * sharing FPU related CR0 bits between the guest and host.
2021 *
2022 * @returns VBox status code.
2023 * @param pVCpu The cross context virtual CPU structure.
2024 * @param pVmxTransient The VMX-transient structure.
2025 *
2026 * @remarks No-long-jump zone!!!
2027 */
2028static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2029{
2030 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2031 {
2032 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2033 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2034
2035 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2036 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2037 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2038 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2039 else
2040 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2041
2042 if (!pVmxTransient->fIsNestedGuest)
2043 {
2044 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2045 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2046 uint64_t const u64ShadowCr0 = u64GuestCr0;
2047 Assert(!RT_HI_U32(u64GuestCr0));
2048
2049 /*
2050 * Setup VT-x's view of the guest CR0.
2051 */
2052 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2053 if (VM_IS_VMX_NESTED_PAGING(pVM))
2054 {
2055#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2056 if (CPUMIsGuestPagingEnabled(pVCpu))
2057 {
2058 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2059 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2060 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2061 }
2062 else
2063 {
2064 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2065 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2066 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2067 }
2068
2069 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2070 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2071 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2072#endif
2073 }
2074 else
2075 {
2076 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2077 u64GuestCr0 |= X86_CR0_WP;
2078 }
2079
2080 /*
2081 * Guest FPU bits.
2082 *
2083 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2084 * using CR0.TS.
2085 *
2086 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2087 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2088 */
2089 u64GuestCr0 |= X86_CR0_NE;
2090
2091 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2092 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2093
2094 /*
2095 * Update exception intercepts.
2096 */
2097 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2098#ifndef IN_NEM_DARWIN
2099 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2100 {
2101 Assert(PDMVmmDevHeapIsEnabled(pVM));
2102 Assert(pVM->hm.s.vmx.pRealModeTSS);
2103 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2104 }
2105 else
2106#endif
2107 {
2108 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2109 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2110 if (fInterceptMF)
2111 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2112 }
2113
2114 /* Additional intercepts for debugging, define these yourself explicitly. */
2115#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2116 uXcptBitmap |= 0
2117 | RT_BIT(X86_XCPT_BP)
2118 | RT_BIT(X86_XCPT_DE)
2119 | RT_BIT(X86_XCPT_NM)
2120 | RT_BIT(X86_XCPT_TS)
2121 | RT_BIT(X86_XCPT_UD)
2122 | RT_BIT(X86_XCPT_NP)
2123 | RT_BIT(X86_XCPT_SS)
2124 | RT_BIT(X86_XCPT_GP)
2125 | RT_BIT(X86_XCPT_PF)
2126 | RT_BIT(X86_XCPT_MF)
2127 ;
2128#elif defined(HMVMX_ALWAYS_TRAP_PF)
2129 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2130#endif
2131 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2132 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2133 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2134 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2135 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2136
2137 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2138 u64GuestCr0 |= fSetCr0;
2139 u64GuestCr0 &= fZapCr0;
2140 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2141
2142 Assert(!RT_HI_U32(u64GuestCr0));
2143 Assert(u64GuestCr0 & X86_CR0_NE);
2144
2145 /* Commit the CR0 and related fields to the guest VMCS. */
2146 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2147 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2148 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2149 {
2150 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2151 AssertRC(rc);
2152 }
2153 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2154 {
2155 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2156 AssertRC(rc);
2157 }
2158
2159 /* Update our caches. */
2160 pVmcsInfo->u32ProcCtls = uProcCtls;
2161 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2162
2163 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2164 }
2165 else
2166 {
2167 /*
2168 * With nested-guests, we may have extended the guest/host mask here since we
2169 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2170 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2171 * originally supplied. We must copy those bits from the nested-guest CR0 into
2172 * the nested-guest CR0 read-shadow.
2173 */
2174 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2175 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2176 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2177
2178 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2179 u64GuestCr0 |= fSetCr0;
2180 u64GuestCr0 &= fZapCr0;
2181 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2182
2183 Assert(!RT_HI_U32(u64GuestCr0));
2184 Assert(u64GuestCr0 & X86_CR0_NE);
2185
2186 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2187 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2188 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2189
2190 Log4Func(("cr0=%#RX64 shadow=%#RX64 vmcs_read_shw=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0,
2191 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u, fSetCr0, fZapCr0));
2192 }
2193
2194 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2195 }
2196
2197 return VINF_SUCCESS;
2198}
2199
2200
2201/**
2202 * Exports the guest control registers (CR3, CR4) into the guest-state area
2203 * in the VMCS.
2204 *
2205 * @returns VBox strict status code.
2206 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2207 * without unrestricted guest access and the VMMDev is not presently
2208 * mapped (e.g. EFI32).
2209 *
2210 * @param pVCpu The cross context virtual CPU structure.
2211 * @param pVmxTransient The VMX-transient structure.
2212 *
2213 * @remarks No-long-jump zone!!!
2214 */
2215static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2216{
2217 int rc = VINF_SUCCESS;
2218 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2219
2220 /*
2221 * Guest CR2.
2222 * It's always loaded in the assembler code. Nothing to do here.
2223 */
2224
2225 /*
2226 * Guest CR3.
2227 */
2228 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2229 {
2230 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2231
2232 if (VM_IS_VMX_NESTED_PAGING(pVM))
2233 {
2234#ifndef IN_NEM_DARWIN
2235 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2236 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2237
2238 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2239 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2240 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2241 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2242
2243 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2244 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2245 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2246
2247 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2248 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2249 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2250 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2251 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2252 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2253 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2254
2255 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2256 AssertRC(rc);
2257#endif
2258
2259 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2260 uint64_t u64GuestCr3 = pCtx->cr3;
2261 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2262 || CPUMIsGuestPagingEnabledEx(pCtx))
2263 {
2264 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2265 if (CPUMIsGuestInPAEModeEx(pCtx))
2266 {
2267 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2268 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2269 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2270 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2271 }
2272
2273 /*
2274 * The guest's view of its CR3 is unblemished with nested paging when the
2275 * guest is using paging or we have unrestricted guest execution to handle
2276 * the guest when it's not using paging.
2277 */
2278 }
2279#ifndef IN_NEM_DARWIN
2280 else
2281 {
2282 /*
2283 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2284 * thinks it accesses physical memory directly, we use our identity-mapped
2285 * page table to map guest-linear to guest-physical addresses. EPT takes care
2286 * of translating it to host-physical addresses.
2287 */
2288 RTGCPHYS GCPhys;
2289 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2290
2291 /* We obtain it here every time as the guest could have relocated this PCI region. */
2292 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2293 if (RT_SUCCESS(rc))
2294 { /* likely */ }
2295 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2296 {
2297 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2298 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2299 }
2300 else
2301 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2302
2303 u64GuestCr3 = GCPhys;
2304 }
2305#endif
2306
2307 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2308 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2309 AssertRC(rc);
2310 }
2311 else
2312 {
2313 Assert(!pVmxTransient->fIsNestedGuest);
2314 /* Non-nested paging case, just use the hypervisor's CR3. */
2315 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2316
2317 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2318 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2319 AssertRC(rc);
2320 }
2321
2322 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2323 }
2324
2325 /*
2326 * Guest CR4.
2327 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2328 */
2329 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2330 {
2331 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2332 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2333
2334 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2335 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2336
2337 /*
2338 * With nested-guests, we may have extended the guest/host mask here (since we
2339 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2340 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2341 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2342 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2343 */
2344 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2345 uint64_t u64GuestCr4 = pCtx->cr4;
2346 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2347 ? pCtx->cr4
2348 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2349 Assert(!RT_HI_U32(u64GuestCr4));
2350
2351#ifndef IN_NEM_DARWIN
2352 /*
2353 * Setup VT-x's view of the guest CR4.
2354 *
2355 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2356 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2357 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2358 *
2359 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2360 */
2361 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2362 {
2363 Assert(pVM->hm.s.vmx.pRealModeTSS);
2364 Assert(PDMVmmDevHeapIsEnabled(pVM));
2365 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2366 }
2367#endif
2368
2369 if (VM_IS_VMX_NESTED_PAGING(pVM))
2370 {
2371 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2372 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2373 {
2374 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2375 u64GuestCr4 |= X86_CR4_PSE;
2376 /* Our identity mapping is a 32-bit page directory. */
2377 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2378 }
2379 /* else use guest CR4.*/
2380 }
2381 else
2382 {
2383 Assert(!pVmxTransient->fIsNestedGuest);
2384
2385 /*
2386 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2387 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2388 */
2389 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2390 {
2391 case PGMMODE_REAL: /* Real-mode. */
2392 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2393 case PGMMODE_32_BIT: /* 32-bit paging. */
2394 {
2395 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2396 break;
2397 }
2398
2399 case PGMMODE_PAE: /* PAE paging. */
2400 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2401 {
2402 u64GuestCr4 |= X86_CR4_PAE;
2403 break;
2404 }
2405
2406 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2407 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2408 {
2409#ifdef VBOX_WITH_64_BITS_GUESTS
2410 /* For our assumption in vmxHCShouldSwapEferMsr. */
2411 Assert(u64GuestCr4 & X86_CR4_PAE);
2412 break;
2413#endif
2414 }
2415 default:
2416 AssertFailed();
2417 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2418 }
2419 }
2420
2421 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2422 u64GuestCr4 |= fSetCr4;
2423 u64GuestCr4 &= fZapCr4;
2424
2425 Assert(!RT_HI_U32(u64GuestCr4));
2426 Assert(u64GuestCr4 & X86_CR4_VMXE);
2427
2428 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2429 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2430 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2431
2432#ifndef IN_NEM_DARWIN
2433 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2434 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2435 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2436 {
2437 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2438 hmR0VmxUpdateStartVmFunction(pVCpu);
2439 }
2440#endif
2441
2442 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2443
2444 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2445 }
2446 return rc;
2447}
2448
2449
2450#ifdef VBOX_STRICT
2451/**
2452 * Strict function to validate segment registers.
2453 *
2454 * @param pVCpu The cross context virtual CPU structure.
2455 * @param pVmcsInfo The VMCS info. object.
2456 *
2457 * @remarks Will import guest CR0 on strict builds during validation of
2458 * segments.
2459 */
2460static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2461{
2462 /*
2463 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2464 *
2465 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2466 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2467 * unusable bit and doesn't change the guest-context value.
2468 */
2469 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2470 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2471 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2472 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2473 && ( !CPUMIsGuestInRealModeEx(pCtx)
2474 && !CPUMIsGuestInV86ModeEx(pCtx)))
2475 {
2476 /* Protected mode checks */
2477 /* CS */
2478 Assert(pCtx->cs.Attr.n.u1Present);
2479 Assert(!(pCtx->cs.Attr.u & 0xf00));
2480 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2481 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2482 || !(pCtx->cs.Attr.n.u1Granularity));
2483 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2484 || (pCtx->cs.Attr.n.u1Granularity));
2485 /* CS cannot be loaded with NULL in protected mode. */
2486 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2487 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2488 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2489 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2490 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2491 else
2492 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2493 /* SS */
2494 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2495 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2496 if ( !(pCtx->cr0 & X86_CR0_PE)
2497 || pCtx->cs.Attr.n.u4Type == 3)
2498 {
2499 Assert(!pCtx->ss.Attr.n.u2Dpl);
2500 }
2501 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2502 {
2503 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2504 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2505 Assert(pCtx->ss.Attr.n.u1Present);
2506 Assert(!(pCtx->ss.Attr.u & 0xf00));
2507 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2508 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2509 || !(pCtx->ss.Attr.n.u1Granularity));
2510 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2511 || (pCtx->ss.Attr.n.u1Granularity));
2512 }
2513 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2514 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2515 {
2516 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2517 Assert(pCtx->ds.Attr.n.u1Present);
2518 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2519 Assert(!(pCtx->ds.Attr.u & 0xf00));
2520 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2521 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2522 || !(pCtx->ds.Attr.n.u1Granularity));
2523 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2524 || (pCtx->ds.Attr.n.u1Granularity));
2525 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2526 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2527 }
2528 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2529 {
2530 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2531 Assert(pCtx->es.Attr.n.u1Present);
2532 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2533 Assert(!(pCtx->es.Attr.u & 0xf00));
2534 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2535 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2536 || !(pCtx->es.Attr.n.u1Granularity));
2537 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2538 || (pCtx->es.Attr.n.u1Granularity));
2539 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2540 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2541 }
2542 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2543 {
2544 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2545 Assert(pCtx->fs.Attr.n.u1Present);
2546 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2547 Assert(!(pCtx->fs.Attr.u & 0xf00));
2548 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2549 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2550 || !(pCtx->fs.Attr.n.u1Granularity));
2551 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2552 || (pCtx->fs.Attr.n.u1Granularity));
2553 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2554 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2555 }
2556 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2557 {
2558 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2559 Assert(pCtx->gs.Attr.n.u1Present);
2560 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2561 Assert(!(pCtx->gs.Attr.u & 0xf00));
2562 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2563 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2564 || !(pCtx->gs.Attr.n.u1Granularity));
2565 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2566 || (pCtx->gs.Attr.n.u1Granularity));
2567 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2568 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2569 }
2570 /* 64-bit capable CPUs. */
2571 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2572 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2573 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2574 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2575 }
2576 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2577 || ( CPUMIsGuestInRealModeEx(pCtx)
2578 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2579 {
2580 /* Real and v86 mode checks. */
2581 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2582 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2583#ifndef IN_NEM_DARWIN
2584 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2585 {
2586 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2587 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2588 }
2589 else
2590#endif
2591 {
2592 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2593 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2594 }
2595
2596 /* CS */
2597 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2598 Assert(pCtx->cs.u32Limit == 0xffff);
2599 AssertMsg(u32CSAttr == 0xf3, ("cs=%#x %#x ", pCtx->cs.Sel, u32CSAttr));
2600 /* SS */
2601 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2602 Assert(pCtx->ss.u32Limit == 0xffff);
2603 Assert(u32SSAttr == 0xf3);
2604 /* DS */
2605 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2606 Assert(pCtx->ds.u32Limit == 0xffff);
2607 Assert(u32DSAttr == 0xf3);
2608 /* ES */
2609 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2610 Assert(pCtx->es.u32Limit == 0xffff);
2611 Assert(u32ESAttr == 0xf3);
2612 /* FS */
2613 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2614 Assert(pCtx->fs.u32Limit == 0xffff);
2615 Assert(u32FSAttr == 0xf3);
2616 /* GS */
2617 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2618 Assert(pCtx->gs.u32Limit == 0xffff);
2619 Assert(u32GSAttr == 0xf3);
2620 /* 64-bit capable CPUs. */
2621 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2622 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2623 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2624 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2625 }
2626}
2627#endif /* VBOX_STRICT */
2628
2629
2630/**
2631 * Exports a guest segment register into the guest-state area in the VMCS.
2632 *
2633 * @returns VBox status code.
2634 * @param pVCpu The cross context virtual CPU structure.
2635 * @param pVmcsInfo The VMCS info. object.
2636 * @param iSegReg The segment register number (X86_SREG_XXX).
2637 * @param pSelReg Pointer to the segment selector.
2638 *
2639 * @remarks No-long-jump zone!!!
2640 */
2641static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2642{
2643 Assert(iSegReg < X86_SREG_COUNT);
2644
2645 uint32_t u32Access = pSelReg->Attr.u;
2646#ifndef IN_NEM_DARWIN
2647 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2648#endif
2649 {
2650 /*
2651 * The way to differentiate between whether this is really a null selector or was just
2652 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2653 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2654 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2655 * NULL selectors loaded in protected-mode have their attribute as 0.
2656 */
2657 if (u32Access)
2658 { }
2659 else
2660 u32Access = X86DESCATTR_UNUSABLE;
2661 }
2662#ifndef IN_NEM_DARWIN
2663 else
2664 {
2665 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2666 u32Access = 0xf3;
2667 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2668 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2669 RT_NOREF_PV(pVCpu);
2670 }
2671#else
2672 RT_NOREF(pVmcsInfo);
2673#endif
2674
2675 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2676 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2677 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2678
2679 /*
2680 * Commit it to the VMCS.
2681 */
2682 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2683 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2684 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2685 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2686 return VINF_SUCCESS;
2687}
2688
2689
2690/**
2691 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2692 * area in the VMCS.
2693 *
2694 * @returns VBox status code.
2695 * @param pVCpu The cross context virtual CPU structure.
2696 * @param pVmxTransient The VMX-transient structure.
2697 *
2698 * @remarks Will import guest CR0 on strict builds during validation of
2699 * segments.
2700 * @remarks No-long-jump zone!!!
2701 */
2702static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2703{
2704 int rc = VERR_INTERNAL_ERROR_5;
2705#ifndef IN_NEM_DARWIN
2706 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2707#endif
2708 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2709 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2710#ifndef IN_NEM_DARWIN
2711 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2712#endif
2713
2714 /*
2715 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2716 */
2717 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2718 {
2719 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2720 {
2721 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2722#ifndef IN_NEM_DARWIN
2723 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2724 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2725#endif
2726 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2727 AssertRC(rc);
2728 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2729 }
2730
2731 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2732 {
2733 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2734#ifndef IN_NEM_DARWIN
2735 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2736 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2737#endif
2738 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2739 AssertRC(rc);
2740 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2741 }
2742
2743 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2744 {
2745 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2746#ifndef IN_NEM_DARWIN
2747 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2748 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2749#endif
2750 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2751 AssertRC(rc);
2752 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2753 }
2754
2755 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2756 {
2757 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2758#ifndef IN_NEM_DARWIN
2759 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2760 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2761#endif
2762 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2763 AssertRC(rc);
2764 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2765 }
2766
2767 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2768 {
2769 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2770#ifndef IN_NEM_DARWIN
2771 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2772 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2773#endif
2774 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2775 AssertRC(rc);
2776 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2777 }
2778
2779 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2780 {
2781 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2782#ifndef IN_NEM_DARWIN
2783 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2784 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2785#endif
2786 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2787 AssertRC(rc);
2788 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2789 }
2790
2791#ifdef VBOX_STRICT
2792 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2793#endif
2794 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2795 pCtx->cs.Attr.u));
2796 }
2797
2798 /*
2799 * Guest TR.
2800 */
2801 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2802 {
2803 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2804
2805 /*
2806 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2807 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2808 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2809 */
2810 uint16_t u16Sel;
2811 uint32_t u32Limit;
2812 uint64_t u64Base;
2813 uint32_t u32AccessRights;
2814#ifndef IN_NEM_DARWIN
2815 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2816#endif
2817 {
2818 u16Sel = pCtx->tr.Sel;
2819 u32Limit = pCtx->tr.u32Limit;
2820 u64Base = pCtx->tr.u64Base;
2821 u32AccessRights = pCtx->tr.Attr.u;
2822 }
2823#ifndef IN_NEM_DARWIN
2824 else
2825 {
2826 Assert(!pVmxTransient->fIsNestedGuest);
2827 Assert(pVM->hm.s.vmx.pRealModeTSS);
2828 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2829
2830 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2831 RTGCPHYS GCPhys;
2832 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2833 AssertRCReturn(rc, rc);
2834
2835 X86DESCATTR DescAttr;
2836 DescAttr.u = 0;
2837 DescAttr.n.u1Present = 1;
2838 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2839
2840 u16Sel = 0;
2841 u32Limit = HM_VTX_TSS_SIZE;
2842 u64Base = GCPhys;
2843 u32AccessRights = DescAttr.u;
2844 }
2845#endif
2846
2847 /* Validate. */
2848 Assert(!(u16Sel & RT_BIT(2)));
2849 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2850 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2851 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2852 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2853 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2854 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2855 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2856 Assert( (u32Limit & 0xfff) == 0xfff
2857 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2858 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2859 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2860
2861 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2862 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2863 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2864 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2865
2866 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2867 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2868 }
2869
2870 /*
2871 * Guest GDTR.
2872 */
2873 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2874 {
2875 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2876
2877 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2878 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2879
2880 /* Validate. */
2881 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2882
2883 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2884 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2885 }
2886
2887 /*
2888 * Guest LDTR.
2889 */
2890 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2891 {
2892 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2893
2894 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2895 uint32_t u32Access;
2896 if ( !pVmxTransient->fIsNestedGuest
2897 && !pCtx->ldtr.Attr.u)
2898 u32Access = X86DESCATTR_UNUSABLE;
2899 else
2900 u32Access = pCtx->ldtr.Attr.u;
2901
2902 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2903 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2904 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2905 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2906
2907 /* Validate. */
2908 if (!(u32Access & X86DESCATTR_UNUSABLE))
2909 {
2910 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2911 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2912 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2913 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2914 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2915 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2916 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2917 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2918 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2919 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2920 }
2921
2922 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2923 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2924 }
2925
2926 /*
2927 * Guest IDTR.
2928 */
2929 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2930 {
2931 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2932
2933 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2934 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2935
2936 /* Validate. */
2937 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2938
2939 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2940 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2941 }
2942
2943 return VINF_SUCCESS;
2944}
2945
2946
2947/**
2948 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2949 * VM-exit interruption info type.
2950 *
2951 * @returns The IEM exception flags.
2952 * @param uVector The event vector.
2953 * @param uVmxEventType The VMX event type.
2954 *
2955 * @remarks This function currently only constructs flags required for
2956 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2957 * and CR2 aspects of an exception are not included).
2958 */
2959static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2960{
2961 uint32_t fIemXcptFlags;
2962 switch (uVmxEventType)
2963 {
2964 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2965 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2966 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2967 break;
2968
2969 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2970 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2971 break;
2972
2973 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2974 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2975 break;
2976
2977 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2978 {
2979 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2980 if (uVector == X86_XCPT_BP)
2981 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2982 else if (uVector == X86_XCPT_OF)
2983 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2984 else
2985 {
2986 fIemXcptFlags = 0;
2987 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2988 }
2989 break;
2990 }
2991
2992 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2993 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2994 break;
2995
2996 default:
2997 fIemXcptFlags = 0;
2998 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
2999 break;
3000 }
3001 return fIemXcptFlags;
3002}
3003
3004
3005/**
3006 * Sets an event as a pending event to be injected into the guest.
3007 *
3008 * @param pVCpu The cross context virtual CPU structure.
3009 * @param u32IntInfo The VM-entry interruption-information field.
3010 * @param cbInstr The VM-entry instruction length in bytes (for
3011 * software interrupts, exceptions and privileged
3012 * software exceptions).
3013 * @param u32ErrCode The VM-entry exception error code.
3014 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3015 * page-fault.
3016 */
3017DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3018 RTGCUINTPTR GCPtrFaultAddress)
3019{
3020 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3021 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3022 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3023 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3024 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3025 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3026}
3027
3028
3029/**
3030 * Sets an external interrupt as pending-for-injection into the VM.
3031 *
3032 * @param pVCpu The cross context virtual CPU structure.
3033 * @param u8Interrupt The external interrupt vector.
3034 */
3035DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3036{
3037 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3038 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3039 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3040 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3041 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3042}
3043
3044
3045/**
3046 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3047 *
3048 * @param pVCpu The cross context virtual CPU structure.
3049 */
3050DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3051{
3052 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3053 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3054 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3055 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3056 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3057}
3058
3059
3060/**
3061 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3062 *
3063 * @param pVCpu The cross context virtual CPU structure.
3064 */
3065DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3066{
3067 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3068 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3069 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3070 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3071 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3072}
3073
3074
3075/**
3076 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3077 *
3078 * @param pVCpu The cross context virtual CPU structure.
3079 */
3080DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3081{
3082 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3083 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3084 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3085 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3086 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3087}
3088
3089
3090/**
3091 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3092 *
3093 * @param pVCpu The cross context virtual CPU structure.
3094 */
3095DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3096{
3097 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3098 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3099 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3100 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3101 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3102}
3103
3104
3105#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3106/**
3107 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3108 *
3109 * @param pVCpu The cross context virtual CPU structure.
3110 * @param u32ErrCode The error code for the general-protection exception.
3111 */
3112DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3113{
3114 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3115 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3116 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3117 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3118 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3119}
3120
3121
3122/**
3123 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3124 *
3125 * @param pVCpu The cross context virtual CPU structure.
3126 * @param u32ErrCode The error code for the stack exception.
3127 */
3128DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3129{
3130 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3131 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3132 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3133 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3134 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3135}
3136#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3137
3138
3139/**
3140 * Fixes up attributes for the specified segment register.
3141 *
3142 * @param pVCpu The cross context virtual CPU structure.
3143 * @param pSelReg The segment register that needs fixing.
3144 * @param pszRegName The register name (for logging and assertions).
3145 */
3146static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3147{
3148 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3149
3150 /*
3151 * If VT-x marks the segment as unusable, most other bits remain undefined:
3152 * - For CS the L, D and G bits have meaning.
3153 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3154 * - For the remaining data segments no bits are defined.
3155 *
3156 * The present bit and the unusable bit has been observed to be set at the
3157 * same time (the selector was supposed to be invalid as we started executing
3158 * a V8086 interrupt in ring-0).
3159 *
3160 * What should be important for the rest of the VBox code, is that the P bit is
3161 * cleared. Some of the other VBox code recognizes the unusable bit, but
3162 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3163 * safe side here, we'll strip off P and other bits we don't care about. If
3164 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3165 *
3166 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3167 */
3168#ifdef VBOX_STRICT
3169 uint32_t const uAttr = pSelReg->Attr.u;
3170#endif
3171
3172 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3173 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3174 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3175
3176#ifdef VBOX_STRICT
3177# ifndef IN_NEM_DARWIN
3178 VMMRZCallRing3Disable(pVCpu);
3179# endif
3180 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3181# ifdef DEBUG_bird
3182 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3183 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3184 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3185# endif
3186# ifndef IN_NEM_DARWIN
3187 VMMRZCallRing3Enable(pVCpu);
3188# endif
3189 NOREF(uAttr);
3190#endif
3191 RT_NOREF2(pVCpu, pszRegName);
3192}
3193
3194
3195/**
3196 * Imports a guest segment register from the current VMCS into the guest-CPU
3197 * context.
3198 *
3199 * @param pVCpu The cross context virtual CPU structure.
3200 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3201 *
3202 * @remarks Called with interrupts and/or preemption disabled.
3203 */
3204template<uint32_t const a_iSegReg>
3205DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3206{
3207 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3208 /* Check that the macros we depend upon here and in the export parenter function works: */
3209#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3210 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3211 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3212 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3213 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3214 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3215 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3216 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3217 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3218 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3219 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3220
3221 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3222
3223 uint16_t u16Sel;
3224 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3225 pSelReg->Sel = u16Sel;
3226 pSelReg->ValidSel = u16Sel;
3227
3228 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3229 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3230
3231 uint32_t u32Attr;
3232 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3233 pSelReg->Attr.u = u32Attr;
3234 if (u32Attr & X86DESCATTR_UNUSABLE)
3235 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3236
3237 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3238}
3239
3240
3241/**
3242 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3243 *
3244 * @param pVCpu The cross context virtual CPU structure.
3245 *
3246 * @remarks Called with interrupts and/or preemption disabled.
3247 */
3248DECLINLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3249{
3250 uint16_t u16Sel;
3251 uint64_t u64Base;
3252 uint32_t u32Limit, u32Attr;
3253 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3254 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3255 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3256 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3257
3258 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3259 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3260 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3261 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3262 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3263 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3264 if (u32Attr & X86DESCATTR_UNUSABLE)
3265 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3266}
3267
3268
3269/**
3270 * Imports the guest TR from the current VMCS into the guest-CPU context.
3271 *
3272 * @param pVCpu The cross context virtual CPU structure.
3273 *
3274 * @remarks Called with interrupts and/or preemption disabled.
3275 */
3276DECLINLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3277{
3278 uint16_t u16Sel;
3279 uint64_t u64Base;
3280 uint32_t u32Limit, u32Attr;
3281 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3282 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3283 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3284 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3285
3286 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3287 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3288 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3289 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3290 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3291 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3292 /* TR is the only selector that can never be unusable. */
3293 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3294}
3295
3296
3297/**
3298 * Core: Imports the guest RIP from the VMCS back into the guest-CPU context.
3299 *
3300 * @returns The RIP value.
3301 * @param pVCpu The cross context virtual CPU structure.
3302 *
3303 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3304 * @remarks Do -not- call this function directly!
3305 */
3306DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3307{
3308 uint64_t u64Val;
3309 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3310 AssertRC(rc);
3311
3312 pVCpu->cpum.GstCtx.rip = u64Val;
3313
3314 return u64Val;
3315}
3316
3317
3318/**
3319 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3320 *
3321 * @param pVCpu The cross context virtual CPU structure.
3322 *
3323 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3324 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3325 * instead!!!
3326 */
3327DECLINLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3328{
3329 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3330 {
3331 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3332 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3333 }
3334}
3335
3336
3337/**
3338 * Core: Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3339 *
3340 * @param pVCpu The cross context virtual CPU structure.
3341 * @param pVmcsInfo The VMCS info. object.
3342 *
3343 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3344 * @remarks Do -not- call this function directly!
3345 */
3346DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3347{
3348 uint64_t fRFlags;
3349 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &fRFlags);
3350 AssertRC(rc);
3351
3352 Assert((fRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
3353 Assert((fRFlags & ~(uint64_t)(X86_EFL_1 | X86_EFL_LIVE_MASK)) == 0);
3354
3355 pVCpu->cpum.GstCtx.rflags.u = fRFlags;
3356#ifndef IN_NEM_DARWIN
3357 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3358 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
3359 { /* mostly likely */ }
3360 else
3361 {
3362 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3363 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3364 }
3365#else
3366 RT_NOREF(pVmcsInfo);
3367#endif
3368}
3369
3370
3371/**
3372 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3373 *
3374 * @param pVCpu The cross context virtual CPU structure.
3375 * @param pVmcsInfo The VMCS info. object.
3376 *
3377 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3378 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3379 * instead!!!
3380 */
3381DECLINLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3382{
3383 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3384 {
3385 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3386 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3387 }
3388}
3389
3390
3391/**
3392 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3393 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3394 */
3395DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3396{
3397 /*
3398 * We must import RIP here to set our EM interrupt-inhibited state.
3399 * We also import RFLAGS as our code that evaluates pending interrupts
3400 * before VM-entry requires it.
3401 */
3402 vmxHCImportGuestRip(pVCpu);
3403 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3404
3405 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3406 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3407 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3408 pVCpu->cpum.GstCtx.rip);
3409 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3410}
3411
3412
3413/**
3414 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3415 * context.
3416 *
3417 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3418 *
3419 * @param pVCpu The cross context virtual CPU structure.
3420 * @param pVmcsInfo The VMCS info. object.
3421 *
3422 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3423 * do not log!
3424 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3425 * instead!!!
3426 */
3427DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3428{
3429 uint32_t u32Val;
3430 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3431 if (!u32Val)
3432 {
3433 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3434 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3435 }
3436 else
3437 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3438}
3439
3440
3441/**
3442 * Worker for VMXR0ImportStateOnDemand.
3443 *
3444 * @returns VBox status code.
3445 * @param pVCpu The cross context virtual CPU structure.
3446 * @param pVmcsInfo The VMCS info. object.
3447 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3448 */
3449static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3450{
3451 int rc = VINF_SUCCESS;
3452 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3453 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3454 uint32_t u32Val;
3455
3456 /*
3457 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3458 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3459 * neither are other host platforms.
3460 *
3461 * Committing this temporarily as it prevents BSOD.
3462 *
3463 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3464 */
3465#ifdef RT_OS_WINDOWS
3466 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3467 return VERR_HM_IPE_1;
3468#endif
3469
3470 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3471
3472#ifndef IN_NEM_DARWIN
3473 /*
3474 * We disable interrupts to make the updating of the state and in particular
3475 * the fExtrn modification atomic wrt to preemption hooks.
3476 */
3477 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3478#endif
3479
3480 fWhat &= pCtx->fExtrn;
3481 if (fWhat)
3482 {
3483 do
3484 {
3485 if (fWhat & CPUMCTX_EXTRN_RIP)
3486 vmxHCImportGuestRip(pVCpu);
3487
3488 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3489 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3490
3491 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3492 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3493 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3494
3495 if (fWhat & CPUMCTX_EXTRN_RSP)
3496 {
3497 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3498 AssertRC(rc);
3499 }
3500
3501 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3502 {
3503 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3504#ifndef IN_NEM_DARWIN
3505 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3506#else
3507 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3508#endif
3509 if (fWhat & CPUMCTX_EXTRN_CS)
3510 {
3511 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3512 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3513 if (fRealOnV86Active)
3514 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3515 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3516 }
3517 if (fWhat & CPUMCTX_EXTRN_SS)
3518 {
3519 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3520 if (fRealOnV86Active)
3521 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3522 }
3523 if (fWhat & CPUMCTX_EXTRN_DS)
3524 {
3525 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3526 if (fRealOnV86Active)
3527 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3528 }
3529 if (fWhat & CPUMCTX_EXTRN_ES)
3530 {
3531 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3532 if (fRealOnV86Active)
3533 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3534 }
3535 if (fWhat & CPUMCTX_EXTRN_FS)
3536 {
3537 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3538 if (fRealOnV86Active)
3539 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3540 }
3541 if (fWhat & CPUMCTX_EXTRN_GS)
3542 {
3543 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3544 if (fRealOnV86Active)
3545 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3546 }
3547 }
3548
3549 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3550 {
3551 if (fWhat & CPUMCTX_EXTRN_LDTR)
3552 vmxHCImportGuestLdtr(pVCpu);
3553
3554 if (fWhat & CPUMCTX_EXTRN_GDTR)
3555 {
3556 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3557 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3558 pCtx->gdtr.cbGdt = u32Val;
3559 }
3560
3561 /* Guest IDTR. */
3562 if (fWhat & CPUMCTX_EXTRN_IDTR)
3563 {
3564 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3565 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3566 pCtx->idtr.cbIdt = u32Val;
3567 }
3568
3569 /* Guest TR. */
3570 if (fWhat & CPUMCTX_EXTRN_TR)
3571 {
3572#ifndef IN_NEM_DARWIN
3573 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3574 don't need to import that one. */
3575 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3576#endif
3577 vmxHCImportGuestTr(pVCpu);
3578 }
3579 }
3580
3581 if (fWhat & CPUMCTX_EXTRN_DR7)
3582 {
3583#ifndef IN_NEM_DARWIN
3584 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3585#endif
3586 {
3587 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3588 AssertRC(rc);
3589 }
3590 }
3591
3592 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3593 {
3594 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3595 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3596 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3597 pCtx->SysEnter.cs = u32Val;
3598 }
3599
3600#ifndef IN_NEM_DARWIN
3601 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3602 {
3603 if ( pVM->hmr0.s.fAllow64BitGuests
3604 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3605 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3606 }
3607
3608 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3609 {
3610 if ( pVM->hmr0.s.fAllow64BitGuests
3611 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3612 {
3613 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3614 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3615 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3616 }
3617 }
3618
3619 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3620 {
3621 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3622 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3623 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3624 Assert(pMsrs);
3625 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3626 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3627 for (uint32_t i = 0; i < cMsrs; i++)
3628 {
3629 uint32_t const idMsr = pMsrs[i].u32Msr;
3630 switch (idMsr)
3631 {
3632 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3633 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3634 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3635 default:
3636 {
3637 uint32_t idxLbrMsr;
3638 if (VM_IS_VMX_LBR(pVM))
3639 {
3640 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3641 {
3642 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3643 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3644 break;
3645 }
3646 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3647 {
3648 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3649 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3650 break;
3651 }
3652 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3653 {
3654 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3655 break;
3656 }
3657 /* Fallthru (no break) */
3658 }
3659 pCtx->fExtrn = 0;
3660 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3661 ASMSetFlags(fEFlags);
3662 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3663 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3664 }
3665 }
3666 }
3667 }
3668#endif
3669
3670 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3671 {
3672 if (fWhat & CPUMCTX_EXTRN_CR0)
3673 {
3674 uint64_t u64Cr0;
3675 uint64_t u64Shadow;
3676 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3677 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3678#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3679 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3680 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3681#else
3682 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3683 {
3684 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3685 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3686 }
3687 else
3688 {
3689 /*
3690 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3691 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3692 * re-construct CR0. See @bugref{9180#c95} for details.
3693 */
3694 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3695 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3696 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3697 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3698 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3699 }
3700#endif
3701#ifndef IN_NEM_DARWIN
3702 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3703#endif
3704 CPUMSetGuestCR0(pVCpu, u64Cr0);
3705#ifndef IN_NEM_DARWIN
3706 VMMRZCallRing3Enable(pVCpu);
3707#endif
3708 }
3709
3710 if (fWhat & CPUMCTX_EXTRN_CR4)
3711 {
3712 uint64_t u64Cr4;
3713 uint64_t u64Shadow;
3714 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3715 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3716#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3717 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3718 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3719#else
3720 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3721 {
3722 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3723 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3724 }
3725 else
3726 {
3727 /*
3728 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3729 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3730 * re-construct CR4. See @bugref{9180#c95} for details.
3731 */
3732 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3733 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3734 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3735 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3736 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3737 }
3738#endif
3739 pCtx->cr4 = u64Cr4;
3740 }
3741
3742 if (fWhat & CPUMCTX_EXTRN_CR3)
3743 {
3744 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3745 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3746 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3747 && CPUMIsGuestPagingEnabledEx(pCtx)))
3748 {
3749 uint64_t u64Cr3;
3750 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3751 if (pCtx->cr3 != u64Cr3)
3752 {
3753 pCtx->cr3 = u64Cr3;
3754 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3755 }
3756
3757 /*
3758 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3759 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3760 */
3761 if (CPUMIsGuestInPAEModeEx(pCtx))
3762 {
3763 X86PDPE aPaePdpes[4];
3764 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3765 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3766 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3767 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3768 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3769 {
3770 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3771 /* PGM now updates PAE PDPTEs while updating CR3. */
3772 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3773 }
3774 }
3775 }
3776 }
3777 }
3778
3779#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3780 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3781 {
3782 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3783 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3784 {
3785 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3786 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3787 if (RT_SUCCESS(rc))
3788 { /* likely */ }
3789 else
3790 break;
3791 }
3792 }
3793#endif
3794 } while (0);
3795
3796 if (RT_SUCCESS(rc))
3797 {
3798 /* Update fExtrn. */
3799 pCtx->fExtrn &= ~fWhat;
3800
3801 /* If everything has been imported, clear the HM keeper bit. */
3802 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3803 {
3804#ifndef IN_NEM_DARWIN
3805 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3806#else
3807 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3808#endif
3809 Assert(!pCtx->fExtrn);
3810 }
3811 }
3812 }
3813#ifndef IN_NEM_DARWIN
3814 else
3815 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3816
3817 /*
3818 * Restore interrupts.
3819 */
3820 ASMSetFlags(fEFlags);
3821#endif
3822
3823 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3824
3825 if (RT_SUCCESS(rc))
3826 { /* likely */ }
3827 else
3828 return rc;
3829
3830 /*
3831 * Honor any pending CR3 updates.
3832 *
3833 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3834 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3835 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3836 *
3837 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3838 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3839 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3840 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3841 *
3842 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3843 *
3844 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3845 */
3846 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3847#ifndef IN_NEM_DARWIN
3848 && VMMRZCallRing3IsEnabled(pVCpu)
3849#endif
3850 )
3851 {
3852 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3853 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3854 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3855 }
3856
3857 return VINF_SUCCESS;
3858}
3859
3860
3861/**
3862 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3863 *
3864 * @returns VBox status code.
3865 * @param pVCpu The cross context virtual CPU structure.
3866 * @param pVmcsInfo The VMCS info. object.
3867 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3868 * in NEM/darwin context.
3869 * @tparam a_fWhat What to import, zero or more bits from
3870 * HMVMX_CPUMCTX_EXTRN_ALL.
3871 */
3872template<uint64_t const a_fWhat>
3873static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3874{
3875 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3876 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3877 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3878 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3879
3880 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3881
3882 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3883
3884 /* RIP and RFLAGS may have been imported already by the post exit code
3885 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3886 of the code is skipping this part of the code. */
3887 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3888 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3889 {
3890 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3891 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3892
3893 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3894 {
3895 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3896 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3897 else
3898 vmxHCImportGuestCoreRip(pVCpu);
3899 }
3900 }
3901
3902 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3903 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3904 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3905
3906 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
3907 {
3908 if (a_fWhat & CPUMCTX_EXTRN_CS)
3909 {
3910 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3911 /** @todo try get rid of this carp, it smells and is probably never ever
3912 * used: */
3913 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3914 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3915 {
3916 vmxHCImportGuestCoreRip(pVCpu);
3917 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3918 }
3919 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3920 }
3921 if (a_fWhat & CPUMCTX_EXTRN_SS)
3922 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3923 if (a_fWhat & CPUMCTX_EXTRN_DS)
3924 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3925 if (a_fWhat & CPUMCTX_EXTRN_ES)
3926 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3927 if (a_fWhat & CPUMCTX_EXTRN_FS)
3928 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3929 if (a_fWhat & CPUMCTX_EXTRN_GS)
3930 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3931
3932 /* Guest TR.
3933 Real-mode emulation using virtual-8086 mode has the fake TSS
3934 (pRealModeTSS) in TR, don't need to import that one. */
3935#ifndef IN_NEM_DARWIN
3936 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
3937 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3938 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
3939#else
3940 if (a_fWhat & CPUMCTX_EXTRN_TR)
3941#endif
3942 vmxHCImportGuestTr(pVCpu);
3943
3944#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
3945 if (fRealOnV86Active)
3946 {
3947 if (a_fWhat & CPUMCTX_EXTRN_CS)
3948 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3949 if (a_fWhat & CPUMCTX_EXTRN_SS)
3950 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3951 if (a_fWhat & CPUMCTX_EXTRN_DS)
3952 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3953 if (a_fWhat & CPUMCTX_EXTRN_ES)
3954 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3955 if (a_fWhat & CPUMCTX_EXTRN_FS)
3956 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3957 if (a_fWhat & CPUMCTX_EXTRN_GS)
3958 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3959 }
3960#endif
3961 }
3962
3963 if (a_fWhat & CPUMCTX_EXTRN_RSP)
3964 {
3965 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
3966 AssertRC(rc);
3967 }
3968
3969 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
3970 vmxHCImportGuestLdtr(pVCpu);
3971
3972 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
3973 {
3974 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
3975 uint32_t u32Val;
3976 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
3977 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
3978 }
3979
3980 /* Guest IDTR. */
3981 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
3982 {
3983 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
3984 uint32_t u32Val;
3985 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
3986 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
3987 }
3988
3989 if (a_fWhat & CPUMCTX_EXTRN_DR7)
3990 {
3991#ifndef IN_NEM_DARWIN
3992 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3993#endif
3994 {
3995 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
3996 AssertRC(rc);
3997 }
3998 }
3999
4000 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
4001 {
4002 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
4003 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
4004 uint32_t u32Val;
4005 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
4006 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
4007 }
4008
4009#ifndef IN_NEM_DARWIN
4010 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
4011 {
4012 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4013 && pVM->hmr0.s.fAllow64BitGuests)
4014 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4015 }
4016
4017 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4018 {
4019 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4020 && pVM->hmr0.s.fAllow64BitGuests)
4021 {
4022 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4023 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4024 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4025 }
4026 }
4027
4028 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4029 {
4030 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
4031 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
4032 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
4033 Assert(pMsrs);
4034 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
4035 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
4036 for (uint32_t i = 0; i < cMsrs; i++)
4037 {
4038 uint32_t const idMsr = pMsrs[i].u32Msr;
4039 switch (idMsr)
4040 {
4041 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
4042 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
4043 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
4044 default:
4045 {
4046 uint32_t idxLbrMsr;
4047 if (VM_IS_VMX_LBR(pVM))
4048 {
4049 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
4050 {
4051 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4052 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4053 break;
4054 }
4055 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
4056 {
4057 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4058 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4059 break;
4060 }
4061 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
4062 {
4063 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
4064 break;
4065 }
4066 }
4067 pVCpu->cpum.GstCtx.fExtrn = 0;
4068 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
4069 ASMSetFlags(fEFlags);
4070 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
4071 return VERR_HM_UNEXPECTED_LD_ST_MSR;
4072 }
4073 }
4074 }
4075 }
4076#endif
4077
4078 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4079 {
4080 uint64_t u64Cr0;
4081 uint64_t u64Shadow;
4082 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc1);
4083 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4084#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4085 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4086 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4087#else
4088 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4089 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4090 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4091 else
4092 {
4093 /*
4094 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
4095 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4096 * re-construct CR0. See @bugref{9180#c95} for details.
4097 */
4098 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4099 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4100 u64Cr0 = (u64Cr0 & ~(pVmcsInfoGst->u64Cr0Mask & pVmcsNstGst->u64Cr0Mask.u))
4101 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
4102 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
4103 Assert(u64Cr0 & X86_CR0_NE);
4104 }
4105#endif
4106#ifndef IN_NEM_DARWIN
4107 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
4108#endif
4109 CPUMSetGuestCR0(pVCpu, u64Cr0);
4110#ifndef IN_NEM_DARWIN
4111 VMMRZCallRing3Enable(pVCpu);
4112#endif
4113 }
4114
4115 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4116 {
4117 uint64_t u64Cr4;
4118 uint64_t u64Shadow;
4119 int rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc1);
4120 int rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4121#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4122 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4123 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4124#else
4125 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4126 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4127 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4128 else
4129 {
4130 /*
4131 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
4132 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4133 * re-construct CR4. See @bugref{9180#c95} for details.
4134 */
4135 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4136 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4137 u64Cr4 = (u64Cr4 & ~(pVmcsInfo->u64Cr4Mask & pVmcsNstGst->u64Cr4Mask.u))
4138 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
4139 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
4140 Assert(u64Cr4 & X86_CR4_VMXE);
4141 }
4142#endif
4143 pVCpu->cpum.GstCtx.cr4 = u64Cr4;
4144 }
4145
4146 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4147 {
4148 /* CR0.PG bit changes are always intercepted, so it's up to date. */
4149 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
4150 || ( VM_IS_VMX_NESTED_PAGING(pVM)
4151 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)))
4152 {
4153 uint64_t u64Cr3;
4154 int const rc0 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc0);
4155 if (pVCpu->cpum.GstCtx.cr3 != u64Cr3)
4156 {
4157 pVCpu->cpum.GstCtx.cr3 = u64Cr3;
4158 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4159 }
4160
4161 /*
4162 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
4163 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
4164 */
4165 if (CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx))
4166 {
4167 X86PDPE aPaePdpes[4];
4168 int const rc1 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc1);
4169 int const rc2 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc2);
4170 int const rc3 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc3);
4171 int const rc4 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc4);
4172 if (memcmp(&aPaePdpes[0], &pVCpu->cpum.GstCtx.aPaePdpes[0], sizeof(aPaePdpes)))
4173 {
4174 memcpy(&pVCpu->cpum.GstCtx.aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
4175 /* PGM now updates PAE PDPTEs while updating CR3. */
4176 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4177 }
4178 }
4179 }
4180 }
4181
4182#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4183 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4184 {
4185 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4186 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4187 {
4188 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4189 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4190 AssertRCReturn(rc, rc);
4191 }
4192 }
4193#endif
4194
4195 /* Update fExtrn. */
4196 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4197
4198 /* If everything has been imported, clear the HM keeper bit. */
4199 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4200 {
4201#ifndef IN_NEM_DARWIN
4202 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4203#else
4204 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4205#endif
4206 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4207 }
4208
4209 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4210
4211 /*
4212 * Honor any pending CR3 updates.
4213 *
4214 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4215 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4216 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4217 *
4218 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4219 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4220 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4221 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4222 *
4223 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4224 *
4225 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4226 */
4227#ifndef IN_NEM_DARWIN
4228 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4229 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4230 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4231 return VINF_SUCCESS;
4232 ASMSetFlags(fEFlags);
4233#else
4234 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4235 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4236 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4237 return VINF_SUCCESS;
4238 RT_NOREF_PV(fEFlags);
4239#endif
4240
4241 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4242 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4243 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4244 return VINF_SUCCESS;
4245}
4246
4247
4248/**
4249 * Internal state fetcher.
4250 *
4251 * @returns VBox status code.
4252 * @param pVCpu The cross context virtual CPU structure.
4253 * @param pVmcsInfo The VMCS info. object.
4254 * @param pszCaller For logging.
4255 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4256 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4257 * already. This is ORed together with @a a_fWhat when
4258 * calculating what needs fetching (just for safety).
4259 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4260 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4261 * already. This is ORed together with @a a_fWhat when
4262 * calculating what needs fetching (just for safety).
4263 */
4264template<uint64_t const a_fWhat,
4265 uint64_t const a_fDoneLocal = 0,
4266 uint64_t const a_fDonePostExit = 0
4267#ifndef IN_NEM_DARWIN
4268 | CPUMCTX_EXTRN_INHIBIT_INT
4269 | CPUMCTX_EXTRN_INHIBIT_NMI
4270# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4271 | HMVMX_CPUMCTX_EXTRN_ALL
4272# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4273 | CPUMCTX_EXTRN_RFLAGS
4274# endif
4275#else /* IN_NEM_DARWIN */
4276 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4277#endif /* IN_NEM_DARWIN */
4278>
4279DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4280{
4281 RT_NOREF_PV(pszCaller);
4282 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4283 {
4284#ifndef IN_NEM_DARWIN
4285 /*
4286 * We disable interrupts to make the updating of the state and in particular
4287 * the fExtrn modification atomic wrt to preemption hooks.
4288 */
4289 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4290#else
4291 RTCCUINTREG const fEFlags = 0;
4292#endif
4293
4294 /*
4295 * We combine all three parameters and take the (probably) inlined optimized
4296 * code path for the new things specified in a_fWhat.
4297 *
4298 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4299 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4300 * also take the streamlined path when both of these are cleared in fExtrn
4301 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4302 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4303 */
4304 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4305 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4306 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4307 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4308 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4309 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4310 {
4311 int const rc = vmxHCImportGuestStateInner< a_fWhat
4312 & HMVMX_CPUMCTX_EXTRN_ALL
4313 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4314#ifndef IN_NEM_DARWIN
4315 ASMSetFlags(fEFlags);
4316#endif
4317 return rc;
4318 }
4319
4320#ifndef IN_NEM_DARWIN
4321 ASMSetFlags(fEFlags);
4322#endif
4323
4324 /*
4325 * We shouldn't normally get here, but it may happen when executing
4326 * in the debug run-loops. Typically, everything should already have
4327 * been fetched then. Otherwise call the fallback state import function.
4328 */
4329 if (fWhatToDo == 0)
4330 { /* hope the cause was the debug loop or something similar */ }
4331 else
4332 {
4333 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4334 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4335 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4336 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4337 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4338 }
4339 }
4340 return VINF_SUCCESS;
4341}
4342
4343
4344/**
4345 * Check per-VM and per-VCPU force flag actions that require us to go back to
4346 * ring-3 for one reason or another.
4347 *
4348 * @returns Strict VBox status code (i.e. informational status codes too)
4349 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4350 * ring-3.
4351 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4352 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4353 * interrupts)
4354 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4355 * all EMTs to be in ring-3.
4356 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4357 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4358 * to the EM loop.
4359 *
4360 * @param pVCpu The cross context virtual CPU structure.
4361 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4362 * @param fStepping Whether we are single-stepping the guest using the
4363 * hypervisor debugger.
4364 *
4365 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4366 * is no longer in VMX non-root mode.
4367 */
4368static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4369{
4370#ifndef IN_NEM_DARWIN
4371 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4372#endif
4373
4374 /*
4375 * Update pending interrupts into the APIC's IRR.
4376 */
4377 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4378 APICUpdatePendingInterrupts(pVCpu);
4379
4380 /*
4381 * Anything pending? Should be more likely than not if we're doing a good job.
4382 */
4383 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4384 if ( !fStepping
4385 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4386 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4387 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4388 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4389 return VINF_SUCCESS;
4390
4391 /* Pending PGM C3 sync. */
4392 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4393 {
4394 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4395 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4396 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4397 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4398 if (rcStrict != VINF_SUCCESS)
4399 {
4400 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4401 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4402 return rcStrict;
4403 }
4404 }
4405
4406 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4407 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4408 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4409 {
4410 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4411 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4412 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4413 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4414 return rc;
4415 }
4416
4417 /* Pending VM request packets, such as hardware interrupts. */
4418 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4419 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4420 {
4421 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4422 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4423 return VINF_EM_PENDING_REQUEST;
4424 }
4425
4426 /* Pending PGM pool flushes. */
4427 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4428 {
4429 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4430 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4431 return VINF_PGM_POOL_FLUSH_PENDING;
4432 }
4433
4434 /* Pending DMA requests. */
4435 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4436 {
4437 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4438 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4439 return VINF_EM_RAW_TO_R3;
4440 }
4441
4442#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4443 /*
4444 * Pending nested-guest events.
4445 *
4446 * Please note the priority of these events are specified and important.
4447 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4448 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4449 */
4450 if (fIsNestedGuest)
4451 {
4452 /* Pending nested-guest APIC-write. */
4453 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4454 {
4455 Log4Func(("Pending nested-guest APIC-write\n"));
4456 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4457 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4458 return rcStrict;
4459 }
4460
4461 /* Pending nested-guest monitor-trap flag (MTF). */
4462 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4463 {
4464 Log4Func(("Pending nested-guest MTF\n"));
4465 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4466 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4467 return rcStrict;
4468 }
4469
4470 /* Pending nested-guest VMX-preemption timer expired. */
4471 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4472 {
4473 Log4Func(("Pending nested-guest preempt timer\n"));
4474 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4475 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4476 return rcStrict;
4477 }
4478 }
4479#else
4480 NOREF(fIsNestedGuest);
4481#endif
4482
4483 return VINF_SUCCESS;
4484}
4485
4486
4487/**
4488 * Converts any TRPM trap into a pending HM event. This is typically used when
4489 * entering from ring-3 (not longjmp returns).
4490 *
4491 * @param pVCpu The cross context virtual CPU structure.
4492 */
4493static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4494{
4495 Assert(TRPMHasTrap(pVCpu));
4496 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4497
4498 uint8_t uVector;
4499 TRPMEVENT enmTrpmEvent;
4500 uint32_t uErrCode;
4501 RTGCUINTPTR GCPtrFaultAddress;
4502 uint8_t cbInstr;
4503 bool fIcebp;
4504
4505 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4506 AssertRC(rc);
4507
4508 uint32_t u32IntInfo;
4509 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4510 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4511
4512 rc = TRPMResetTrap(pVCpu);
4513 AssertRC(rc);
4514 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4515 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4516
4517 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4518}
4519
4520
4521/**
4522 * Converts the pending HM event into a TRPM trap.
4523 *
4524 * @param pVCpu The cross context virtual CPU structure.
4525 */
4526static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4527{
4528 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4529
4530 /* If a trap was already pending, we did something wrong! */
4531 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4532
4533 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4534 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4535 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4536
4537 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4538
4539 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4540 AssertRC(rc);
4541
4542 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4543 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4544
4545 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4546 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4547 else
4548 {
4549 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4550 switch (uVectorType)
4551 {
4552 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4553 TRPMSetTrapDueToIcebp(pVCpu);
4554 RT_FALL_THRU();
4555 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4556 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4557 {
4558 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4559 || ( uVector == X86_XCPT_BP /* INT3 */
4560 || uVector == X86_XCPT_OF /* INTO */
4561 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4562 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4563 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4564 break;
4565 }
4566 }
4567 }
4568
4569 /* We're now done converting the pending event. */
4570 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4571}
4572
4573
4574/**
4575 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4576 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4577 *
4578 * @param pVCpu The cross context virtual CPU structure.
4579 * @param pVmcsInfo The VMCS info. object.
4580 */
4581static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4582{
4583 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4584 {
4585 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4586 {
4587 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4588 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4589 AssertRC(rc);
4590 }
4591 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4592}
4593
4594
4595/**
4596 * Clears the interrupt-window exiting control in the VMCS.
4597 *
4598 * @param pVCpu The cross context virtual CPU structure.
4599 * @param pVmcsInfo The VMCS info. object.
4600 */
4601DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4602{
4603 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4604 {
4605 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4606 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4607 AssertRC(rc);
4608 }
4609}
4610
4611
4612/**
4613 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4614 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4615 *
4616 * @param pVCpu The cross context virtual CPU structure.
4617 * @param pVmcsInfo The VMCS info. object.
4618 */
4619static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4620{
4621 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4622 {
4623 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4624 {
4625 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4626 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4627 AssertRC(rc);
4628 Log4Func(("Setup NMI-window exiting\n"));
4629 }
4630 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4631}
4632
4633
4634/**
4635 * Clears the NMI-window exiting control in the VMCS.
4636 *
4637 * @param pVCpu The cross context virtual CPU structure.
4638 * @param pVmcsInfo The VMCS info. object.
4639 */
4640DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4641{
4642 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4643 {
4644 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4645 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4646 AssertRC(rc);
4647 }
4648}
4649
4650
4651/**
4652 * Injects an event into the guest upon VM-entry by updating the relevant fields
4653 * in the VM-entry area in the VMCS.
4654 *
4655 * @returns Strict VBox status code (i.e. informational status codes too).
4656 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4657 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4658 *
4659 * @param pVCpu The cross context virtual CPU structure.
4660 * @param pVmcsInfo The VMCS info object.
4661 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4662 * @param pEvent The event being injected.
4663 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4664 * will be updated if necessary. This cannot not be NULL.
4665 * @param fStepping Whether we're single-stepping guest execution and should
4666 * return VINF_EM_DBG_STEPPED if the event is injected
4667 * directly (registers modified by us, not by hardware on
4668 * VM-entry).
4669 */
4670static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4671 bool fStepping, uint32_t *pfIntrState)
4672{
4673 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4674 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4675 Assert(pfIntrState);
4676
4677#ifdef IN_NEM_DARWIN
4678 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4679#endif
4680
4681 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4682 uint32_t u32IntInfo = pEvent->u64IntInfo;
4683 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4684 uint32_t const cbInstr = pEvent->cbInstr;
4685 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4686 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4687 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4688
4689#ifdef VBOX_STRICT
4690 /*
4691 * Validate the error-code-valid bit for hardware exceptions.
4692 * No error codes for exceptions in real-mode.
4693 *
4694 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4695 */
4696 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4697 && !CPUMIsGuestInRealModeEx(pCtx))
4698 {
4699 switch (uVector)
4700 {
4701 case X86_XCPT_PF:
4702 case X86_XCPT_DF:
4703 case X86_XCPT_TS:
4704 case X86_XCPT_NP:
4705 case X86_XCPT_SS:
4706 case X86_XCPT_GP:
4707 case X86_XCPT_AC:
4708 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4709 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4710 RT_FALL_THRU();
4711 default:
4712 break;
4713 }
4714 }
4715
4716 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4717 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4718 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4719#endif
4720
4721 RT_NOREF(uVector);
4722 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4723 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4724 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4725 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4726 {
4727 Assert(uVector <= X86_XCPT_LAST);
4728 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4729 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4730 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4731 }
4732 else
4733 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4734
4735 /*
4736 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4737 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4738 * interrupt handler in the (real-mode) guest.
4739 *
4740 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4741 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4742 */
4743 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4744 {
4745#ifndef IN_NEM_DARWIN
4746 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4747#endif
4748 {
4749 /*
4750 * For CPUs with unrestricted guest execution enabled and with the guest
4751 * in real-mode, we must not set the deliver-error-code bit.
4752 *
4753 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4754 */
4755 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4756 }
4757#ifndef IN_NEM_DARWIN
4758 else
4759 {
4760 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4761 Assert(PDMVmmDevHeapIsEnabled(pVM));
4762 Assert(pVM->hm.s.vmx.pRealModeTSS);
4763 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4764
4765 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4766 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4767 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4768 AssertRCReturn(rc2, rc2);
4769
4770 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4771 size_t const cbIdtEntry = sizeof(X86IDTR16);
4772 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4773 {
4774 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4775 if (uVector == X86_XCPT_DF)
4776 return VINF_EM_RESET;
4777
4778 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4779 No error codes for exceptions in real-mode. */
4780 if (uVector == X86_XCPT_GP)
4781 {
4782 static HMEVENT const s_EventXcptDf
4783 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4784 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4785 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4786 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4787 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4788 }
4789
4790 /*
4791 * If we're injecting an event with no valid IDT entry, inject a #GP.
4792 * No error codes for exceptions in real-mode.
4793 *
4794 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4795 */
4796 static HMEVENT const s_EventXcptGp
4797 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4798 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4799 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4800 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4801 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4802 }
4803
4804 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4805 uint16_t uGuestIp = pCtx->ip;
4806 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4807 {
4808 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4809 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4810 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4811 }
4812 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4813 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4814
4815 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4816 X86IDTR16 IdtEntry;
4817 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4818 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4819 AssertRCReturn(rc2, rc2);
4820
4821 /* Construct the stack frame for the interrupt/exception handler. */
4822 VBOXSTRICTRC rcStrict;
4823 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, (uint16_t)pCtx->eflags.u);
4824 if (rcStrict == VINF_SUCCESS)
4825 {
4826 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4827 if (rcStrict == VINF_SUCCESS)
4828 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4829 }
4830
4831 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4832 if (rcStrict == VINF_SUCCESS)
4833 {
4834 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4835 pCtx->rip = IdtEntry.offSel;
4836 pCtx->cs.Sel = IdtEntry.uSel;
4837 pCtx->cs.ValidSel = IdtEntry.uSel;
4838 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4839 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4840 && uVector == X86_XCPT_PF)
4841 pCtx->cr2 = GCPtrFault;
4842
4843 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4844 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4845 | HM_CHANGED_GUEST_RSP);
4846
4847 /*
4848 * If we delivered a hardware exception (other than an NMI) and if there was
4849 * block-by-STI in effect, we should clear it.
4850 */
4851 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4852 {
4853 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4854 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4855 Log4Func(("Clearing inhibition due to STI\n"));
4856 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4857 }
4858
4859 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4860 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4861
4862 /*
4863 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4864 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4865 */
4866 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4867
4868 /*
4869 * If we eventually support nested-guest execution without unrestricted guest execution,
4870 * we should set fInterceptEvents here.
4871 */
4872 Assert(!fIsNestedGuest);
4873
4874 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4875 if (fStepping)
4876 rcStrict = VINF_EM_DBG_STEPPED;
4877 }
4878 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4879 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4880 return rcStrict;
4881 }
4882#else
4883 RT_NOREF(pVmcsInfo);
4884#endif
4885 }
4886
4887 /*
4888 * Validate.
4889 */
4890 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4891 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4892
4893 /*
4894 * Inject the event into the VMCS.
4895 */
4896 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4897 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4898 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4899 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4900 AssertRC(rc);
4901
4902 /*
4903 * Update guest CR2 if this is a page-fault.
4904 */
4905 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4906 pCtx->cr2 = GCPtrFault;
4907
4908 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4909 return VINF_SUCCESS;
4910}
4911
4912
4913/**
4914 * Evaluates the event to be delivered to the guest and sets it as the pending
4915 * event.
4916 *
4917 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4918 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4919 * NOT restore these force-flags.
4920 *
4921 * @returns Strict VBox status code (i.e. informational status codes too).
4922 * @param pVCpu The cross context virtual CPU structure.
4923 * @param pVmcsInfo The VMCS information structure.
4924 * @param fIsNestedGuest Flag whether the evaluation happens for a nested guest.
4925 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4926 */
4927static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4928{
4929 Assert(pfIntrState);
4930 Assert(!TRPMHasTrap(pVCpu));
4931
4932 /*
4933 * Compute/update guest-interruptibility state related FFs.
4934 * The FFs will be used below while evaluating events to be injected.
4935 */
4936 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4937
4938 /*
4939 * Evaluate if a new event needs to be injected.
4940 * An event that's already pending has already performed all necessary checks.
4941 */
4942 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4943 && !CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
4944 {
4945 /** @todo SMI. SMIs take priority over NMIs. */
4946
4947 /*
4948 * NMIs.
4949 * NMIs take priority over external interrupts.
4950 */
4951#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4952 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4953#endif
4954 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4955 {
4956 /*
4957 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4958 *
4959 * For a nested-guest, the FF always indicates the outer guest's ability to
4960 * receive an NMI while the guest-interruptibility state bit depends on whether
4961 * the nested-hypervisor is using virtual-NMIs.
4962 */
4963 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4964 {
4965#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4966 if ( fIsNestedGuest
4967 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4968 return IEMExecVmxVmexitXcptNmi(pVCpu);
4969#endif
4970 vmxHCSetPendingXcptNmi(pVCpu);
4971 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4972 Log4Func(("NMI pending injection\n"));
4973
4974 /* We've injected the NMI, bail. */
4975 return VINF_SUCCESS;
4976 }
4977 if (!fIsNestedGuest)
4978 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4979 }
4980
4981 /*
4982 * External interrupts (PIC/APIC).
4983 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4984 * We cannot re-request the interrupt from the controller again.
4985 */
4986 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4987 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4988 {
4989 Assert(!DBGFIsStepping(pVCpu));
4990 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4991 AssertRC(rc);
4992
4993 /*
4994 * We must not check EFLAGS directly when executing a nested-guest, use
4995 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4996 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4997 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4998 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4999 *
5000 * See Intel spec. 25.4.1 "Event Blocking".
5001 */
5002 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
5003 {
5004#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5005 if ( fIsNestedGuest
5006 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5007 {
5008 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
5009 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5010 return rcStrict;
5011 }
5012#endif
5013 uint8_t u8Interrupt;
5014 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5015 if (RT_SUCCESS(rc))
5016 {
5017#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5018 if ( fIsNestedGuest
5019 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5020 {
5021 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
5022 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5023 return rcStrict;
5024 }
5025#endif
5026 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
5027 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
5028 }
5029 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
5030 {
5031 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
5032
5033 if ( !fIsNestedGuest
5034 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
5035 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
5036 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
5037
5038 /*
5039 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
5040 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
5041 * need to re-set this force-flag here.
5042 */
5043 }
5044 else
5045 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
5046
5047 /* We've injected the interrupt or taken necessary action, bail. */
5048 return VINF_SUCCESS;
5049 }
5050 if (!fIsNestedGuest)
5051 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5052 }
5053 }
5054 else if (!fIsNestedGuest)
5055 {
5056 /*
5057 * An event is being injected or we are in an interrupt shadow. Check if another event is
5058 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
5059 * the pending event.
5060 */
5061 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5062 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
5063 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5064 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5065 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5066 }
5067 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
5068
5069 return VINF_SUCCESS;
5070}
5071
5072
5073/**
5074 * Injects any pending events into the guest if the guest is in a state to
5075 * receive them.
5076 *
5077 * @returns Strict VBox status code (i.e. informational status codes too).
5078 * @param pVCpu The cross context virtual CPU structure.
5079 * @param pVmcsInfo The VMCS information structure.
5080 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5081 * @param fIntrState The VT-x guest-interruptibility state.
5082 * @param fStepping Whether we are single-stepping the guest using the
5083 * hypervisor debugger and should return
5084 * VINF_EM_DBG_STEPPED if the event was dispatched
5085 * directly.
5086 */
5087static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5088 uint32_t fIntrState, bool fStepping)
5089{
5090 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5091#ifndef IN_NEM_DARWIN
5092 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5093#endif
5094
5095#ifdef VBOX_STRICT
5096 /*
5097 * Verify guest-interruptibility state.
5098 *
5099 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5100 * since injecting an event may modify the interruptibility state and we must thus always
5101 * use fIntrState.
5102 */
5103 {
5104 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5105 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5106 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5107 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5108 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5109 Assert(!TRPMHasTrap(pVCpu));
5110 NOREF(fBlockMovSS); NOREF(fBlockSti);
5111 }
5112#endif
5113
5114 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5115 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5116 {
5117 /*
5118 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5119 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5120 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5121 *
5122 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5123 */
5124 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5125#ifdef VBOX_STRICT
5126 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5127 {
5128 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5129 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5130 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5131 }
5132 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5133 {
5134 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5135 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5136 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5137 }
5138#endif
5139 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5140 uIntType));
5141
5142 /*
5143 * Inject the event and get any changes to the guest-interruptibility state.
5144 *
5145 * The guest-interruptibility state may need to be updated if we inject the event
5146 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5147 */
5148 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5149 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5150
5151 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5152 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5153 else
5154 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5155 }
5156
5157 /*
5158 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5159 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5160 */
5161 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5162 && !fIsNestedGuest)
5163 {
5164 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5165
5166 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5167 {
5168 /*
5169 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5170 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5171 */
5172 Assert(!DBGFIsStepping(pVCpu));
5173 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_TF);
5174 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
5175 fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5176 AssertRC(rc);
5177 }
5178 else
5179 {
5180 /*
5181 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5182 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5183 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5184 * we use MTF, so just make sure it's called before executing guest-code.
5185 */
5186 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5187 }
5188 }
5189 /* else: for nested-guest currently handling while merging controls. */
5190
5191 /*
5192 * Finally, update the guest-interruptibility state.
5193 *
5194 * This is required for the real-on-v86 software interrupt injection, for
5195 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5196 */
5197 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5198 AssertRC(rc);
5199
5200 /*
5201 * There's no need to clear the VM-entry interruption-information field here if we're not
5202 * injecting anything. VT-x clears the valid bit on every VM-exit.
5203 *
5204 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5205 */
5206
5207 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5208 return rcStrict;
5209}
5210
5211
5212/**
5213 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5214 * and update error record fields accordingly.
5215 *
5216 * @returns VMX_IGS_* error codes.
5217 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5218 * wrong with the guest state.
5219 *
5220 * @param pVCpu The cross context virtual CPU structure.
5221 * @param pVmcsInfo The VMCS info. object.
5222 *
5223 * @remarks This function assumes our cache of the VMCS controls
5224 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5225 */
5226static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5227{
5228#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5229#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { uError = (err); break; } else do { } while (0)
5230
5231 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5232 uint32_t uError = VMX_IGS_ERROR;
5233 uint32_t u32IntrState = 0;
5234#ifndef IN_NEM_DARWIN
5235 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5236 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5237#else
5238 bool const fUnrestrictedGuest = true;
5239#endif
5240 do
5241 {
5242 int rc;
5243
5244 /*
5245 * Guest-interruptibility state.
5246 *
5247 * Read this first so that any check that fails prior to those that actually
5248 * require the guest-interruptibility state would still reflect the correct
5249 * VMCS value and avoids causing further confusion.
5250 */
5251 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5252 AssertRC(rc);
5253
5254 uint32_t u32Val;
5255 uint64_t u64Val;
5256
5257 /*
5258 * CR0.
5259 */
5260 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5261 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5262 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5263 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5264 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5265 if (fUnrestrictedGuest)
5266 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5267
5268 uint64_t u64GuestCr0;
5269 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5270 AssertRC(rc);
5271 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5272 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5273 if ( !fUnrestrictedGuest
5274 && (u64GuestCr0 & X86_CR0_PG)
5275 && !(u64GuestCr0 & X86_CR0_PE))
5276 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5277
5278 /*
5279 * CR4.
5280 */
5281 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5282 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5283 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5284
5285 uint64_t u64GuestCr4;
5286 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5287 AssertRC(rc);
5288 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5289 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5290
5291 /*
5292 * IA32_DEBUGCTL MSR.
5293 */
5294 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5295 AssertRC(rc);
5296 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5297 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5298 {
5299 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5300 }
5301 uint64_t u64DebugCtlMsr = u64Val;
5302
5303#ifdef VBOX_STRICT
5304 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5305 AssertRC(rc);
5306 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5307#endif
5308 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5309
5310 /*
5311 * RIP and RFLAGS.
5312 */
5313 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5314 AssertRC(rc);
5315 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5316 if ( !fLongModeGuest
5317 || !pCtx->cs.Attr.n.u1Long)
5318 {
5319 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5320 }
5321 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5322 * must be identical if the "IA-32e mode guest" VM-entry
5323 * control is 1 and CS.L is 1. No check applies if the
5324 * CPU supports 64 linear-address bits. */
5325
5326 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5327 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5328 AssertRC(rc);
5329 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5330 VMX_IGS_RFLAGS_RESERVED);
5331 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5332 uint32_t const u32Eflags = u64Val;
5333
5334 if ( fLongModeGuest
5335 || ( fUnrestrictedGuest
5336 && !(u64GuestCr0 & X86_CR0_PE)))
5337 {
5338 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5339 }
5340
5341 uint32_t u32EntryInfo;
5342 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5343 AssertRC(rc);
5344 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5345 {
5346 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5347 }
5348
5349 /*
5350 * 64-bit checks.
5351 */
5352 if (fLongModeGuest)
5353 {
5354 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5355 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5356 }
5357
5358 if ( !fLongModeGuest
5359 && (u64GuestCr4 & X86_CR4_PCIDE))
5360 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5361
5362 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5363 * 51:32 beyond the processor's physical-address width are 0. */
5364
5365 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5366 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5367 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5368
5369#ifndef IN_NEM_DARWIN
5370 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5371 AssertRC(rc);
5372 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5373
5374 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5375 AssertRC(rc);
5376 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5377#endif
5378
5379 /*
5380 * PERF_GLOBAL MSR.
5381 */
5382 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5383 {
5384 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5385 AssertRC(rc);
5386 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5387 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5388 }
5389
5390 /*
5391 * PAT MSR.
5392 */
5393 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5394 {
5395 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5396 AssertRC(rc);
5397 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5398 for (unsigned i = 0; i < 8; i++)
5399 {
5400 uint8_t u8Val = (u64Val & 0xff);
5401 if ( u8Val != 0 /* UC */
5402 && u8Val != 1 /* WC */
5403 && u8Val != 4 /* WT */
5404 && u8Val != 5 /* WP */
5405 && u8Val != 6 /* WB */
5406 && u8Val != 7 /* UC- */)
5407 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5408 u64Val >>= 8;
5409 }
5410 }
5411
5412 /*
5413 * EFER MSR.
5414 */
5415 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5416 {
5417 Assert(g_fHmVmxSupportsVmcsEfer);
5418 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5419 AssertRC(rc);
5420 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5421 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5422 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5423 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5424 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5425 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5426 * iemVmxVmentryCheckGuestState(). */
5427 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5428 || !(u64GuestCr0 & X86_CR0_PG)
5429 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5430 VMX_IGS_EFER_LMA_LME_MISMATCH);
5431 }
5432
5433 /*
5434 * Segment registers.
5435 */
5436 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5437 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5438 if (!(u32Eflags & X86_EFL_VM))
5439 {
5440 /* CS */
5441 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5442 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5443 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5444 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5445 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5446 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5447 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5448 /* CS cannot be loaded with NULL in protected mode. */
5449 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5450 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5451 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5452 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5453 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5454 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5455 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5456 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5457 else
5458 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5459
5460 /* SS */
5461 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5462 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5463 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5464 if ( !(pCtx->cr0 & X86_CR0_PE)
5465 || pCtx->cs.Attr.n.u4Type == 3)
5466 {
5467 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5468 }
5469
5470 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5471 {
5472 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5473 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5474 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5475 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5476 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5477 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5478 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5479 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5480 }
5481
5482 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5483 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5484 {
5485 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5486 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5487 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5488 || pCtx->ds.Attr.n.u4Type > 11
5489 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5490 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5491 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5492 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5493 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5494 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5495 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5496 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5497 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5498 }
5499 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5500 {
5501 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5502 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5503 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5504 || pCtx->es.Attr.n.u4Type > 11
5505 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5506 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5507 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5508 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5509 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5510 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5511 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5512 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5513 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5514 }
5515 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5516 {
5517 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5518 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5519 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5520 || pCtx->fs.Attr.n.u4Type > 11
5521 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5522 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5523 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5524 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5525 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5526 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5527 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5528 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5529 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5530 }
5531 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5532 {
5533 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5534 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5535 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5536 || pCtx->gs.Attr.n.u4Type > 11
5537 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5538 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5539 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5540 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5541 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5542 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5543 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5544 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5545 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5546 }
5547 /* 64-bit capable CPUs. */
5548 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5549 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5550 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5551 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5552 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5553 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5554 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5555 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5556 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5557 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5558 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5559 }
5560 else
5561 {
5562 /* V86 mode checks. */
5563 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5564 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5565 {
5566 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5567 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5568 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5569 }
5570 else
5571 {
5572 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5573 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5574 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5575 }
5576
5577 /* CS */
5578 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5579 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5580 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5581 /* SS */
5582 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5583 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5584 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5585 /* DS */
5586 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5587 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5588 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5589 /* ES */
5590 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5591 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5592 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5593 /* FS */
5594 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5595 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5596 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5597 /* GS */
5598 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5599 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5600 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5601 /* 64-bit capable CPUs. */
5602 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5603 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5604 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5605 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5606 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5607 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5608 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5609 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5610 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5611 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5612 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5613 }
5614
5615 /*
5616 * TR.
5617 */
5618 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5619 /* 64-bit capable CPUs. */
5620 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5621 if (fLongModeGuest)
5622 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5623 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5624 else
5625 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5626 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5627 VMX_IGS_TR_ATTR_TYPE_INVALID);
5628 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5629 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5630 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5631 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5632 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5633 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5634 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5635 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5636
5637 /*
5638 * GDTR and IDTR (64-bit capable checks).
5639 */
5640 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5641 AssertRC(rc);
5642 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5643
5644 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5645 AssertRC(rc);
5646 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5647
5648 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5649 AssertRC(rc);
5650 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5651
5652 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5653 AssertRC(rc);
5654 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5655
5656 /*
5657 * Guest Non-Register State.
5658 */
5659 /* Activity State. */
5660 uint32_t u32ActivityState;
5661 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5662 AssertRC(rc);
5663 HMVMX_CHECK_BREAK( !u32ActivityState
5664 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5665 VMX_IGS_ACTIVITY_STATE_INVALID);
5666 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5667 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5668
5669 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5670 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5671 {
5672 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5673 }
5674
5675 /** @todo Activity state and injecting interrupts. Left as a todo since we
5676 * currently don't use activity states but ACTIVE. */
5677
5678 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5679 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5680
5681 /* Guest interruptibility-state. */
5682 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5683 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5684 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5685 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5686 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5687 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5688 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5689 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5690 {
5691 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5692 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5693 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5694 }
5695 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5696 {
5697 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5698 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5699 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5700 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5701 }
5702 /** @todo Assumes the processor is not in SMM. */
5703 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5704 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5705 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5706 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5707 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5708 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5709 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5710 {
5711 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5712 }
5713
5714 /* Pending debug exceptions. */
5715 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5716 AssertRC(rc);
5717 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5718 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5719 u32Val = u64Val; /* For pending debug exceptions checks below. */
5720
5721 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5722 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5723 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5724 {
5725 if ( (u32Eflags & X86_EFL_TF)
5726 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5727 {
5728 /* Bit 14 is PendingDebug.BS. */
5729 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5730 }
5731 if ( !(u32Eflags & X86_EFL_TF)
5732 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5733 {
5734 /* Bit 14 is PendingDebug.BS. */
5735 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5736 }
5737 }
5738
5739#ifndef IN_NEM_DARWIN
5740 /* VMCS link pointer. */
5741 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5742 AssertRC(rc);
5743 if (u64Val != UINT64_C(0xffffffffffffffff))
5744 {
5745 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5746 /** @todo Bits beyond the processor's physical-address width MBZ. */
5747 /** @todo SMM checks. */
5748 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5749 Assert(pVmcsInfo->pvShadowVmcs);
5750 VMXVMCSREVID VmcsRevId;
5751 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5752 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5753 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5754 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5755 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5756 }
5757
5758 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5759 * not using nested paging? */
5760 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5761 && !fLongModeGuest
5762 && CPUMIsGuestInPAEModeEx(pCtx))
5763 {
5764 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5765 AssertRC(rc);
5766 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5767
5768 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5769 AssertRC(rc);
5770 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5771
5772 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5773 AssertRC(rc);
5774 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5775
5776 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5777 AssertRC(rc);
5778 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5779 }
5780#endif
5781
5782 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5783 if (uError == VMX_IGS_ERROR)
5784 uError = VMX_IGS_REASON_NOT_FOUND;
5785 } while (0);
5786
5787 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5788 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5789 return uError;
5790
5791#undef HMVMX_ERROR_BREAK
5792#undef HMVMX_CHECK_BREAK
5793}
5794
5795
5796#ifndef HMVMX_USE_FUNCTION_TABLE
5797/**
5798 * Handles a guest VM-exit from hardware-assisted VMX execution.
5799 *
5800 * @returns Strict VBox status code (i.e. informational status codes too).
5801 * @param pVCpu The cross context virtual CPU structure.
5802 * @param pVmxTransient The VMX-transient structure.
5803 */
5804DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5805{
5806#ifdef DEBUG_ramshankar
5807# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5808 do { \
5809 if (a_fSave != 0) \
5810 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5811 VBOXSTRICTRC rcStrict = a_CallExpr; \
5812 if (a_fSave != 0) \
5813 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5814 return rcStrict; \
5815 } while (0)
5816#else
5817# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5818#endif
5819 uint32_t const uExitReason = pVmxTransient->uExitReason;
5820 switch (uExitReason)
5821 {
5822 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5823 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5824 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5825 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5826 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5827 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5828 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5829 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5830 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5831 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5832 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5833 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5834 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5835 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5836 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5837 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5838 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5839 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5840 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5841 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5842 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5843 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5844 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5845 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5846 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5847 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5848 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5849 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5850 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5851 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5852#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5853 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5854 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5855 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5856 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5857 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5858 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5859 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5860 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5861 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5862 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5863#else
5864 case VMX_EXIT_VMCLEAR:
5865 case VMX_EXIT_VMLAUNCH:
5866 case VMX_EXIT_VMPTRLD:
5867 case VMX_EXIT_VMPTRST:
5868 case VMX_EXIT_VMREAD:
5869 case VMX_EXIT_VMRESUME:
5870 case VMX_EXIT_VMWRITE:
5871 case VMX_EXIT_VMXOFF:
5872 case VMX_EXIT_VMXON:
5873 case VMX_EXIT_INVVPID:
5874 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5875#endif
5876#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5877 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5878#else
5879 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5880#endif
5881
5882 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5883 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5884 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5885
5886 case VMX_EXIT_INIT_SIGNAL:
5887 case VMX_EXIT_SIPI:
5888 case VMX_EXIT_IO_SMI:
5889 case VMX_EXIT_SMI:
5890 case VMX_EXIT_ERR_MSR_LOAD:
5891 case VMX_EXIT_ERR_MACHINE_CHECK:
5892 case VMX_EXIT_PML_FULL:
5893 case VMX_EXIT_VIRTUALIZED_EOI:
5894 case VMX_EXIT_GDTR_IDTR_ACCESS:
5895 case VMX_EXIT_LDTR_TR_ACCESS:
5896 case VMX_EXIT_APIC_WRITE:
5897 case VMX_EXIT_RDRAND:
5898 case VMX_EXIT_RSM:
5899 case VMX_EXIT_VMFUNC:
5900 case VMX_EXIT_ENCLS:
5901 case VMX_EXIT_RDSEED:
5902 case VMX_EXIT_XSAVES:
5903 case VMX_EXIT_XRSTORS:
5904 case VMX_EXIT_UMWAIT:
5905 case VMX_EXIT_TPAUSE:
5906 case VMX_EXIT_LOADIWKEY:
5907 default:
5908 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5909 }
5910#undef VMEXIT_CALL_RET
5911}
5912#endif /* !HMVMX_USE_FUNCTION_TABLE */
5913
5914
5915#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5916/**
5917 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5918 *
5919 * @returns Strict VBox status code (i.e. informational status codes too).
5920 * @param pVCpu The cross context virtual CPU structure.
5921 * @param pVmxTransient The VMX-transient structure.
5922 */
5923DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5924{
5925 uint32_t const uExitReason = pVmxTransient->uExitReason;
5926 switch (uExitReason)
5927 {
5928# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5929 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5930 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5931# else
5932 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5933 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5934# endif
5935 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5936 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5937 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5938
5939 /*
5940 * We shouldn't direct host physical interrupts to the nested-guest.
5941 */
5942 case VMX_EXIT_EXT_INT:
5943 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5944
5945 /*
5946 * Instructions that cause VM-exits unconditionally or the condition is
5947 * always taken solely from the nested hypervisor (meaning if the VM-exit
5948 * happens, it's guaranteed to be a nested-guest VM-exit).
5949 *
5950 * - Provides VM-exit instruction length ONLY.
5951 */
5952 case VMX_EXIT_CPUID: /* Unconditional. */
5953 case VMX_EXIT_VMCALL:
5954 case VMX_EXIT_GETSEC:
5955 case VMX_EXIT_INVD:
5956 case VMX_EXIT_XSETBV:
5957 case VMX_EXIT_VMLAUNCH:
5958 case VMX_EXIT_VMRESUME:
5959 case VMX_EXIT_VMXOFF:
5960 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5961 case VMX_EXIT_VMFUNC:
5962 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5963
5964 /*
5965 * Instructions that cause VM-exits unconditionally or the condition is
5966 * always taken solely from the nested hypervisor (meaning if the VM-exit
5967 * happens, it's guaranteed to be a nested-guest VM-exit).
5968 *
5969 * - Provides VM-exit instruction length.
5970 * - Provides VM-exit information.
5971 * - Optionally provides Exit qualification.
5972 *
5973 * Since Exit qualification is 0 for all VM-exits where it is not
5974 * applicable, reading and passing it to the guest should produce
5975 * defined behavior.
5976 *
5977 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5978 */
5979 case VMX_EXIT_INVEPT: /* Unconditional. */
5980 case VMX_EXIT_INVVPID:
5981 case VMX_EXIT_VMCLEAR:
5982 case VMX_EXIT_VMPTRLD:
5983 case VMX_EXIT_VMPTRST:
5984 case VMX_EXIT_VMXON:
5985 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5986 case VMX_EXIT_LDTR_TR_ACCESS:
5987 case VMX_EXIT_RDRAND:
5988 case VMX_EXIT_RDSEED:
5989 case VMX_EXIT_XSAVES:
5990 case VMX_EXIT_XRSTORS:
5991 case VMX_EXIT_UMWAIT:
5992 case VMX_EXIT_TPAUSE:
5993 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5994
5995 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5996 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5997 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5998 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5999 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
6000 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
6001 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
6002 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
6003 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
6004 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
6005 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
6006 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
6007 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
6008 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
6009 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
6010 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
6011 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
6012 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
6013 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
6014
6015 case VMX_EXIT_PREEMPT_TIMER:
6016 {
6017 /** @todo NSTVMX: Preempt timer. */
6018 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
6019 }
6020
6021 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
6022 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
6023
6024 case VMX_EXIT_VMREAD:
6025 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
6026
6027 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
6028 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
6029
6030 case VMX_EXIT_INIT_SIGNAL:
6031 case VMX_EXIT_SIPI:
6032 case VMX_EXIT_IO_SMI:
6033 case VMX_EXIT_SMI:
6034 case VMX_EXIT_ERR_MSR_LOAD:
6035 case VMX_EXIT_ERR_MACHINE_CHECK:
6036 case VMX_EXIT_PML_FULL:
6037 case VMX_EXIT_RSM:
6038 default:
6039 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6040 }
6041}
6042#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6043
6044
6045/** @name VM-exit helpers.
6046 * @{
6047 */
6048/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6049/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6050/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6051
6052/** Macro for VM-exits called unexpectedly. */
6053#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
6054 do { \
6055 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6056 return VERR_VMX_UNEXPECTED_EXIT; \
6057 } while (0)
6058
6059#ifdef VBOX_STRICT
6060# ifndef IN_NEM_DARWIN
6061/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6062# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6063 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6064
6065# define HMVMX_ASSERT_PREEMPT_CPUID() \
6066 do { \
6067 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6068 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6069 } while (0)
6070
6071# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6072 do { \
6073 AssertPtr((a_pVCpu)); \
6074 AssertPtr((a_pVmxTransient)); \
6075 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6076 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6077 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6078 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6079 Assert((a_pVmxTransient)->pVmcsInfo); \
6080 Assert(ASMIntAreEnabled()); \
6081 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6082 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6083 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6084 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6085 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6086 HMVMX_ASSERT_PREEMPT_CPUID(); \
6087 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6088 } while (0)
6089# else
6090# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6091# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6092# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6093 do { \
6094 AssertPtr((a_pVCpu)); \
6095 AssertPtr((a_pVmxTransient)); \
6096 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6097 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6098 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6099 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6100 Assert((a_pVmxTransient)->pVmcsInfo); \
6101 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6102 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6103 } while (0)
6104# endif
6105
6106# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6107 do { \
6108 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6109 Assert((a_pVmxTransient)->fIsNestedGuest); \
6110 } while (0)
6111
6112# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6113 do { \
6114 Log4Func(("\n")); \
6115 } while (0)
6116#else
6117# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6118 do { \
6119 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6120 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6121 } while (0)
6122
6123# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6124 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6125
6126# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6127#endif
6128
6129#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6130/** Macro that does the necessary privilege checks and intercepted VM-exits for
6131 * guests that attempted to execute a VMX instruction. */
6132# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6133 do \
6134 { \
6135 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6136 if (rcStrictTmp == VINF_SUCCESS) \
6137 { /* likely */ } \
6138 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6139 { \
6140 Assert((a_pVCpu)->hm.s.Event.fPending); \
6141 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6142 return VINF_SUCCESS; \
6143 } \
6144 else \
6145 { \
6146 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6147 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6148 } \
6149 } while (0)
6150
6151/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6152# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6153 do \
6154 { \
6155 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6156 (a_pGCPtrEffAddr)); \
6157 if (rcStrictTmp == VINF_SUCCESS) \
6158 { /* likely */ } \
6159 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6160 { \
6161 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6162 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6163 NOREF(uXcptTmp); \
6164 return VINF_SUCCESS; \
6165 } \
6166 else \
6167 { \
6168 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6169 return rcStrictTmp; \
6170 } \
6171 } while (0)
6172#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6173
6174
6175/**
6176 * Advances the guest RIP by the specified number of bytes.
6177 *
6178 * @param pVCpu The cross context virtual CPU structure.
6179 * @param cbInstr Number of bytes to advance the RIP by.
6180 *
6181 * @remarks No-long-jump zone!!!
6182 */
6183DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6184{
6185 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
6186
6187 /*
6188 * Advance RIP.
6189 *
6190 * The upper 32 bits are only set when in 64-bit mode, so we have to detect
6191 * when the addition causes a "carry" into the upper half and check whether
6192 * we're in 64-bit and can go on with it or wether we should zap the top
6193 * half. (Note! The 8086, 80186 and 80286 emulation is done exclusively in
6194 * IEM, so we don't need to bother with pre-386 16-bit wraparound.)
6195 *
6196 * See PC wrap around tests in bs3-cpu-weird-1.
6197 */
6198 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
6199 uint64_t const uRipNext = uRipPrev + cbInstr;
6200 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & RT_BIT_64(32))
6201 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
6202 pVCpu->cpum.GstCtx.rip = uRipNext;
6203 else
6204 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
6205
6206 /*
6207 * Clear RF and interrupt shadowing.
6208 */
6209 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF))))
6210 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
6211 else
6212 {
6213 if ((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF)) == X86_EFL_TF)
6214 {
6215 /** @todo \#DB - single step. */
6216 }
6217 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
6218 }
6219 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
6220
6221 /* Mark both RIP and RFLAGS as updated. */
6222 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6223}
6224
6225
6226/**
6227 * Advances the guest RIP after reading it from the VMCS.
6228 *
6229 * @returns VBox status code, no informational status codes.
6230 * @param pVCpu The cross context virtual CPU structure.
6231 * @param pVmxTransient The VMX-transient structure.
6232 *
6233 * @remarks No-long-jump zone!!!
6234 */
6235static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6236{
6237 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6238 /** @todo consider template here after checking callers. */
6239 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6240 AssertRCReturn(rc, rc);
6241
6242 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6243 return VINF_SUCCESS;
6244}
6245
6246
6247/**
6248 * Handle a condition that occurred while delivering an event through the guest or
6249 * nested-guest IDT.
6250 *
6251 * @returns Strict VBox status code (i.e. informational status codes too).
6252 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6253 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6254 * to continue execution of the guest which will delivery the \#DF.
6255 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6256 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6257 *
6258 * @param pVCpu The cross context virtual CPU structure.
6259 * @param pVmxTransient The VMX-transient structure.
6260 *
6261 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6262 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6263 * is due to an EPT violation, PML full or SPP-related event.
6264 *
6265 * @remarks No-long-jump zone!!!
6266 */
6267static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6268{
6269 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6270 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6271 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6272 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6273 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6274 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6275
6276 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6277 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6278 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6279 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6280 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6281 {
6282 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6283 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6284
6285 /*
6286 * If the event was a software interrupt (generated with INT n) or a software exception
6287 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6288 * can handle the VM-exit and continue guest execution which will re-execute the
6289 * instruction rather than re-injecting the exception, as that can cause premature
6290 * trips to ring-3 before injection and involve TRPM which currently has no way of
6291 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6292 * the problem).
6293 */
6294 IEMXCPTRAISE enmRaise;
6295 IEMXCPTRAISEINFO fRaiseInfo;
6296 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6297 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6298 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6299 {
6300 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6301 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6302 }
6303 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6304 {
6305 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6306 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6307 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6308
6309 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6310 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6311
6312 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6313
6314 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6315 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6316 {
6317 pVmxTransient->fVectoringPF = true;
6318 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6319 }
6320 }
6321 else
6322 {
6323 /*
6324 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6325 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6326 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6327 */
6328 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6329 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6330 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6331 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6332 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6333 }
6334
6335 /*
6336 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6337 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6338 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6339 * subsequent VM-entry would fail, see @bugref{7445}.
6340 *
6341 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6342 */
6343 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6344 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6345 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6346 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6347 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6348
6349 switch (enmRaise)
6350 {
6351 case IEMXCPTRAISE_CURRENT_XCPT:
6352 {
6353 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6354 Assert(rcStrict == VINF_SUCCESS);
6355 break;
6356 }
6357
6358 case IEMXCPTRAISE_PREV_EVENT:
6359 {
6360 uint32_t u32ErrCode;
6361 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6362 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6363 else
6364 u32ErrCode = 0;
6365
6366 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6367 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6368 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6369 pVCpu->cpum.GstCtx.cr2);
6370
6371 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6372 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6373 Assert(rcStrict == VINF_SUCCESS);
6374 break;
6375 }
6376
6377 case IEMXCPTRAISE_REEXEC_INSTR:
6378 Assert(rcStrict == VINF_SUCCESS);
6379 break;
6380
6381 case IEMXCPTRAISE_DOUBLE_FAULT:
6382 {
6383 /*
6384 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6385 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6386 */
6387 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6388 {
6389 pVmxTransient->fVectoringDoublePF = true;
6390 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6391 pVCpu->cpum.GstCtx.cr2));
6392 rcStrict = VINF_SUCCESS;
6393 }
6394 else
6395 {
6396 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6397 vmxHCSetPendingXcptDF(pVCpu);
6398 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6399 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6400 rcStrict = VINF_HM_DOUBLE_FAULT;
6401 }
6402 break;
6403 }
6404
6405 case IEMXCPTRAISE_TRIPLE_FAULT:
6406 {
6407 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6408 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6409 rcStrict = VINF_EM_RESET;
6410 break;
6411 }
6412
6413 case IEMXCPTRAISE_CPU_HANG:
6414 {
6415 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6416 rcStrict = VERR_EM_GUEST_CPU_HANG;
6417 break;
6418 }
6419
6420 default:
6421 {
6422 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6423 rcStrict = VERR_VMX_IPE_2;
6424 break;
6425 }
6426 }
6427 }
6428 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6429 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6430 {
6431 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6432 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6433 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6434 {
6435 /*
6436 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6437 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6438 * that virtual NMIs remain blocked until the IRET execution is completed.
6439 *
6440 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6441 */
6442 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6443 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6444 }
6445 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6446 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6447 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6448 {
6449 /*
6450 * Execution of IRET caused an EPT violation, page-modification log-full event or
6451 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6452 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6453 * that virtual NMIs remain blocked until the IRET execution is completed.
6454 *
6455 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6456 */
6457 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6458 {
6459 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6460 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6461 }
6462 }
6463 }
6464
6465 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6466 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6467 return rcStrict;
6468}
6469
6470
6471#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6472/**
6473 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6474 * guest attempting to execute a VMX instruction.
6475 *
6476 * @returns Strict VBox status code (i.e. informational status codes too).
6477 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6478 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6479 *
6480 * @param pVCpu The cross context virtual CPU structure.
6481 * @param uExitReason The VM-exit reason.
6482 *
6483 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6484 * @remarks No-long-jump zone!!!
6485 */
6486static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6487{
6488 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6489 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6490
6491 /*
6492 * The physical CPU would have already checked the CPU mode/code segment.
6493 * We shall just assert here for paranoia.
6494 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6495 */
6496 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6497 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6498 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6499
6500 if (uExitReason == VMX_EXIT_VMXON)
6501 {
6502 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6503
6504 /*
6505 * We check CR4.VMXE because it is required to be always set while in VMX operation
6506 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6507 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6508 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6509 */
6510 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6511 {
6512 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6513 vmxHCSetPendingXcptUD(pVCpu);
6514 return VINF_HM_PENDING_XCPT;
6515 }
6516 }
6517 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6518 {
6519 /*
6520 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6521 * (other than VMXON), we need to raise a #UD.
6522 */
6523 Log4Func(("Not in VMX root mode -> #UD\n"));
6524 vmxHCSetPendingXcptUD(pVCpu);
6525 return VINF_HM_PENDING_XCPT;
6526 }
6527
6528 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6529 return VINF_SUCCESS;
6530}
6531
6532
6533/**
6534 * Decodes the memory operand of an instruction that caused a VM-exit.
6535 *
6536 * The Exit qualification field provides the displacement field for memory
6537 * operand instructions, if any.
6538 *
6539 * @returns Strict VBox status code (i.e. informational status codes too).
6540 * @retval VINF_SUCCESS if the operand was successfully decoded.
6541 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6542 * operand.
6543 * @param pVCpu The cross context virtual CPU structure.
6544 * @param uExitInstrInfo The VM-exit instruction information field.
6545 * @param enmMemAccess The memory operand's access type (read or write).
6546 * @param GCPtrDisp The instruction displacement field, if any. For
6547 * RIP-relative addressing pass RIP + displacement here.
6548 * @param pGCPtrMem Where to store the effective destination memory address.
6549 *
6550 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6551 * virtual-8086 mode hence skips those checks while verifying if the
6552 * segment is valid.
6553 */
6554static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6555 PRTGCPTR pGCPtrMem)
6556{
6557 Assert(pGCPtrMem);
6558 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6559 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6560 | CPUMCTX_EXTRN_CR0);
6561
6562 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6563 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6564 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6565
6566 VMXEXITINSTRINFO ExitInstrInfo;
6567 ExitInstrInfo.u = uExitInstrInfo;
6568 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6569 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6570 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6571 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6572 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6573 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6574 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6575 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6576 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6577
6578 /*
6579 * Validate instruction information.
6580 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6581 */
6582 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6583 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6584 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6585 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6586 AssertLogRelMsgReturn(fIsMemOperand,
6587 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6588
6589 /*
6590 * Compute the complete effective address.
6591 *
6592 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6593 * See AMD spec. 4.5.2 "Segment Registers".
6594 */
6595 RTGCPTR GCPtrMem = GCPtrDisp;
6596 if (fBaseRegValid)
6597 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6598 if (fIdxRegValid)
6599 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6600
6601 RTGCPTR const GCPtrOff = GCPtrMem;
6602 if ( !fIsLongMode
6603 || iSegReg >= X86_SREG_FS)
6604 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6605 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6606
6607 /*
6608 * Validate effective address.
6609 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6610 */
6611 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6612 Assert(cbAccess > 0);
6613 if (fIsLongMode)
6614 {
6615 if (X86_IS_CANONICAL(GCPtrMem))
6616 {
6617 *pGCPtrMem = GCPtrMem;
6618 return VINF_SUCCESS;
6619 }
6620
6621 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6622 * "Data Limit Checks in 64-bit Mode". */
6623 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6624 vmxHCSetPendingXcptGP(pVCpu, 0);
6625 return VINF_HM_PENDING_XCPT;
6626 }
6627
6628 /*
6629 * This is a watered down version of iemMemApplySegment().
6630 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6631 * and segment CPL/DPL checks are skipped.
6632 */
6633 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6634 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6635 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6636
6637 /* Check if the segment is present and usable. */
6638 if ( pSel->Attr.n.u1Present
6639 && !pSel->Attr.n.u1Unusable)
6640 {
6641 Assert(pSel->Attr.n.u1DescType);
6642 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6643 {
6644 /* Check permissions for the data segment. */
6645 if ( enmMemAccess == VMXMEMACCESS_WRITE
6646 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6647 {
6648 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6649 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6650 return VINF_HM_PENDING_XCPT;
6651 }
6652
6653 /* Check limits if it's a normal data segment. */
6654 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6655 {
6656 if ( GCPtrFirst32 > pSel->u32Limit
6657 || GCPtrLast32 > pSel->u32Limit)
6658 {
6659 Log4Func(("Data segment limit exceeded. "
6660 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6661 GCPtrLast32, pSel->u32Limit));
6662 if (iSegReg == X86_SREG_SS)
6663 vmxHCSetPendingXcptSS(pVCpu, 0);
6664 else
6665 vmxHCSetPendingXcptGP(pVCpu, 0);
6666 return VINF_HM_PENDING_XCPT;
6667 }
6668 }
6669 else
6670 {
6671 /* Check limits if it's an expand-down data segment.
6672 Note! The upper boundary is defined by the B bit, not the G bit! */
6673 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6674 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6675 {
6676 Log4Func(("Expand-down data segment limit exceeded. "
6677 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6678 GCPtrLast32, pSel->u32Limit));
6679 if (iSegReg == X86_SREG_SS)
6680 vmxHCSetPendingXcptSS(pVCpu, 0);
6681 else
6682 vmxHCSetPendingXcptGP(pVCpu, 0);
6683 return VINF_HM_PENDING_XCPT;
6684 }
6685 }
6686 }
6687 else
6688 {
6689 /* Check permissions for the code segment. */
6690 if ( enmMemAccess == VMXMEMACCESS_WRITE
6691 || ( enmMemAccess == VMXMEMACCESS_READ
6692 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6693 {
6694 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6695 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6696 vmxHCSetPendingXcptGP(pVCpu, 0);
6697 return VINF_HM_PENDING_XCPT;
6698 }
6699
6700 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6701 if ( GCPtrFirst32 > pSel->u32Limit
6702 || GCPtrLast32 > pSel->u32Limit)
6703 {
6704 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6705 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6706 if (iSegReg == X86_SREG_SS)
6707 vmxHCSetPendingXcptSS(pVCpu, 0);
6708 else
6709 vmxHCSetPendingXcptGP(pVCpu, 0);
6710 return VINF_HM_PENDING_XCPT;
6711 }
6712 }
6713 }
6714 else
6715 {
6716 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6717 vmxHCSetPendingXcptGP(pVCpu, 0);
6718 return VINF_HM_PENDING_XCPT;
6719 }
6720
6721 *pGCPtrMem = GCPtrMem;
6722 return VINF_SUCCESS;
6723}
6724#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6725
6726
6727/**
6728 * VM-exit helper for LMSW.
6729 */
6730static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6731{
6732 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6733 AssertRCReturn(rc, rc);
6734
6735 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6736 AssertMsg( rcStrict == VINF_SUCCESS
6737 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6738
6739 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6740 if (rcStrict == VINF_IEM_RAISED_XCPT)
6741 {
6742 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6743 rcStrict = VINF_SUCCESS;
6744 }
6745
6746 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6747 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6748 return rcStrict;
6749}
6750
6751
6752/**
6753 * VM-exit helper for CLTS.
6754 */
6755static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6756{
6757 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6758 AssertRCReturn(rc, rc);
6759
6760 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6761 AssertMsg( rcStrict == VINF_SUCCESS
6762 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6763
6764 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6765 if (rcStrict == VINF_IEM_RAISED_XCPT)
6766 {
6767 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6768 rcStrict = VINF_SUCCESS;
6769 }
6770
6771 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6772 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6773 return rcStrict;
6774}
6775
6776
6777/**
6778 * VM-exit helper for MOV from CRx (CRx read).
6779 */
6780static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6781{
6782 Assert(iCrReg < 16);
6783 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6784
6785 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6786 AssertRCReturn(rc, rc);
6787
6788 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6789 AssertMsg( rcStrict == VINF_SUCCESS
6790 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6791
6792 if (iGReg == X86_GREG_xSP)
6793 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6794 else
6795 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6796#ifdef VBOX_WITH_STATISTICS
6797 switch (iCrReg)
6798 {
6799 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6800 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6801 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6802 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6803 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6804 }
6805#endif
6806 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6807 return rcStrict;
6808}
6809
6810
6811/**
6812 * VM-exit helper for MOV to CRx (CRx write).
6813 */
6814static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6815{
6816 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6817
6818 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6819 AssertMsg( rcStrict == VINF_SUCCESS
6820 || rcStrict == VINF_IEM_RAISED_XCPT
6821 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6822
6823 switch (iCrReg)
6824 {
6825 case 0:
6826 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6827 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6828 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6829 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6830 break;
6831
6832 case 2:
6833 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6834 /* Nothing to do here, CR2 it's not part of the VMCS. */
6835 break;
6836
6837 case 3:
6838 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6839 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6840 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6841 break;
6842
6843 case 4:
6844 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6845 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6846#ifndef IN_NEM_DARWIN
6847 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6848 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6849#else
6850 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6851#endif
6852 break;
6853
6854 case 8:
6855 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6856 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6857 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6858 break;
6859
6860 default:
6861 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6862 break;
6863 }
6864
6865 if (rcStrict == VINF_IEM_RAISED_XCPT)
6866 {
6867 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6868 rcStrict = VINF_SUCCESS;
6869 }
6870 return rcStrict;
6871}
6872
6873
6874/**
6875 * VM-exit exception handler for \#PF (Page-fault exception).
6876 *
6877 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6878 */
6879static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6880{
6881 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6882 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6883
6884#ifndef IN_NEM_DARWIN
6885 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6886 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6887 { /* likely */ }
6888 else
6889#endif
6890 {
6891#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6892 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6893#endif
6894 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6895 if (!pVmxTransient->fVectoringDoublePF)
6896 {
6897 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6898 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6899 }
6900 else
6901 {
6902 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6903 Assert(!pVmxTransient->fIsNestedGuest);
6904 vmxHCSetPendingXcptDF(pVCpu);
6905 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6906 }
6907 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6908 return VINF_SUCCESS;
6909 }
6910
6911 Assert(!pVmxTransient->fIsNestedGuest);
6912
6913 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6914 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6915 if (pVmxTransient->fVectoringPF)
6916 {
6917 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6918 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6919 }
6920
6921 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6922 AssertRCReturn(rc, rc);
6923
6924 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
6925 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
6926
6927 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6928 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
6929
6930 Log4Func(("#PF: rc=%Rrc\n", rc));
6931 if (rc == VINF_SUCCESS)
6932 {
6933 /*
6934 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6935 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6936 */
6937 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6938 TRPMResetTrap(pVCpu);
6939 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6940 return rc;
6941 }
6942
6943 if (rc == VINF_EM_RAW_GUEST_TRAP)
6944 {
6945 if (!pVmxTransient->fVectoringDoublePF)
6946 {
6947 /* It's a guest page fault and needs to be reflected to the guest. */
6948 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6949 TRPMResetTrap(pVCpu);
6950 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6951 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6952 uGstErrorCode, pVmxTransient->uExitQual);
6953 }
6954 else
6955 {
6956 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6957 TRPMResetTrap(pVCpu);
6958 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6959 vmxHCSetPendingXcptDF(pVCpu);
6960 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6961 }
6962
6963 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6964 return VINF_SUCCESS;
6965 }
6966
6967 TRPMResetTrap(pVCpu);
6968 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6969 return rc;
6970}
6971
6972
6973/**
6974 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6975 *
6976 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6977 */
6978static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6979{
6980 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6981 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6982
6983 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6984 AssertRCReturn(rc, rc);
6985
6986 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6987 {
6988 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6989 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6990
6991 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6992 * provides VM-exit instruction length. If this causes problem later,
6993 * disassemble the instruction like it's done on AMD-V. */
6994 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6995 AssertRCReturn(rc2, rc2);
6996 return rc;
6997 }
6998
6999 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
7000 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7001 return VINF_SUCCESS;
7002}
7003
7004
7005/**
7006 * VM-exit exception handler for \#BP (Breakpoint exception).
7007 *
7008 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7009 */
7010static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7011{
7012 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7013 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
7014
7015 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7016 AssertRCReturn(rc, rc);
7017
7018 VBOXSTRICTRC rcStrict;
7019 if (!pVmxTransient->fIsNestedGuest)
7020 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
7021 else
7022 rcStrict = VINF_EM_RAW_GUEST_TRAP;
7023
7024 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
7025 {
7026 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7027 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7028 rcStrict = VINF_SUCCESS;
7029 }
7030
7031 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
7032 return rcStrict;
7033}
7034
7035
7036/**
7037 * VM-exit exception handler for \#AC (Alignment-check exception).
7038 *
7039 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7040 */
7041static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7042{
7043 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7044
7045 /*
7046 * Detect #ACs caused by host having enabled split-lock detection.
7047 * Emulate such instructions.
7048 */
7049#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
7050 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7051 AssertRCReturn(rc, rc);
7052 /** @todo detect split lock in cpu feature? */
7053 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
7054 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7055 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
7056 || CPUMGetGuestCPL(pVCpu) != 3
7057 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
7058 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
7059 {
7060 /*
7061 * Check for debug/trace events and import state accordingly.
7062 */
7063 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
7064 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7065 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7066#ifndef IN_NEM_DARWIN
7067 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7068#endif
7069 )
7070 {
7071 if (pVM->cCpus == 1)
7072 {
7073#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7074 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7075 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7076#else
7077 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7078 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7079#endif
7080 AssertRCReturn(rc, rc);
7081 }
7082 }
7083 else
7084 {
7085 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7086 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7087 AssertRCReturn(rc, rc);
7088
7089 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7090
7091 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7092 {
7093 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7094 if (rcStrict != VINF_SUCCESS)
7095 return rcStrict;
7096 }
7097 }
7098
7099 /*
7100 * Emulate the instruction.
7101 *
7102 * We have to ignore the LOCK prefix here as we must not retrigger the
7103 * detection on the host. This isn't all that satisfactory, though...
7104 */
7105 if (pVM->cCpus == 1)
7106 {
7107 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7108 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7109
7110 /** @todo For SMP configs we should do a rendezvous here. */
7111 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7112 if (rcStrict == VINF_SUCCESS)
7113#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7114 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7115 HM_CHANGED_GUEST_RIP
7116 | HM_CHANGED_GUEST_RFLAGS
7117 | HM_CHANGED_GUEST_GPRS_MASK
7118 | HM_CHANGED_GUEST_CS
7119 | HM_CHANGED_GUEST_SS);
7120#else
7121 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7122#endif
7123 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7124 {
7125 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7126 rcStrict = VINF_SUCCESS;
7127 }
7128 return rcStrict;
7129 }
7130 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7131 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7132 return VINF_EM_EMULATE_SPLIT_LOCK;
7133 }
7134
7135 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7136 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7137 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7138
7139 /* Re-inject it. We'll detect any nesting before getting here. */
7140 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7141 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7142 return VINF_SUCCESS;
7143}
7144
7145
7146/**
7147 * VM-exit exception handler for \#DB (Debug exception).
7148 *
7149 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7150 */
7151static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7152{
7153 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7154 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7155
7156 /*
7157 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7158 */
7159 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7160
7161 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7162 uint64_t const uDR6 = X86_DR6_INIT_VAL
7163 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7164 | X86_DR6_BD | X86_DR6_BS));
7165 Log6Func(("uDR6=%#RX64 uExitQual=%#RX64\n", uDR6, pVmxTransient->uExitQual));
7166
7167 int rc;
7168 if (!pVmxTransient->fIsNestedGuest)
7169 {
7170 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7171
7172 /*
7173 * Prevents stepping twice over the same instruction when the guest is stepping using
7174 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7175 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7176 */
7177 if ( rc == VINF_EM_DBG_STEPPED
7178 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7179 {
7180 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7181 rc = VINF_EM_RAW_GUEST_TRAP;
7182 }
7183 }
7184 else
7185 rc = VINF_EM_RAW_GUEST_TRAP;
7186 Log6Func(("rc=%Rrc\n", rc));
7187 if (rc == VINF_EM_RAW_GUEST_TRAP)
7188 {
7189 /*
7190 * The exception was for the guest. Update DR6, DR7.GD and
7191 * IA32_DEBUGCTL.LBR before forwarding it.
7192 * See Intel spec. 27.1 "Architectural State before a VM-Exit"
7193 * and @sdmv3{077,622,17.2.3,Debug Status Register (DR6)}.
7194 */
7195#ifndef IN_NEM_DARWIN
7196 VMMRZCallRing3Disable(pVCpu);
7197 HM_DISABLE_PREEMPT(pVCpu);
7198
7199 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7200 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7201 if (CPUMIsGuestDebugStateActive(pVCpu))
7202 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7203
7204 HM_RESTORE_PREEMPT();
7205 VMMRZCallRing3Enable(pVCpu);
7206#else
7207 /** @todo */
7208#endif
7209
7210 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7211 AssertRCReturn(rc, rc);
7212
7213 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7214 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7215
7216 /* Paranoia. */
7217 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7218 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7219
7220 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7221 AssertRC(rc);
7222
7223 /*
7224 * Raise #DB in the guest.
7225 *
7226 * It is important to reflect exactly what the VM-exit gave us (preserving the
7227 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7228 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7229 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7230 *
7231 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7232 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7233 */
7234 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7235 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7236 return VINF_SUCCESS;
7237 }
7238
7239 /*
7240 * Not a guest trap, must be a hypervisor related debug event then.
7241 * Update DR6 in case someone is interested in it.
7242 */
7243 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7244 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7245 CPUMSetHyperDR6(pVCpu, uDR6);
7246
7247 return rc;
7248}
7249
7250
7251/**
7252 * Hacks its way around the lovely mesa driver's backdoor accesses.
7253 *
7254 * @sa hmR0SvmHandleMesaDrvGp.
7255 */
7256static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7257{
7258 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7259 RT_NOREF(pCtx);
7260
7261 /* For now we'll just skip the instruction. */
7262 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7263}
7264
7265
7266/**
7267 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7268 * backdoor logging w/o checking what it is running inside.
7269 *
7270 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7271 * backdoor port and magic numbers loaded in registers.
7272 *
7273 * @returns true if it is, false if it isn't.
7274 * @sa hmR0SvmIsMesaDrvGp.
7275 */
7276DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7277{
7278 /* 0xed: IN eAX,dx */
7279 uint8_t abInstr[1];
7280 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7281 return false;
7282
7283 /* Check that it is #GP(0). */
7284 if (pVmxTransient->uExitIntErrorCode != 0)
7285 return false;
7286
7287 /* Check magic and port. */
7288 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7289 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7290 if (pCtx->rax != UINT32_C(0x564d5868))
7291 return false;
7292 if (pCtx->dx != UINT32_C(0x5658))
7293 return false;
7294
7295 /* Flat ring-3 CS. */
7296 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7297 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7298 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7299 if (pCtx->cs.Attr.n.u2Dpl != 3)
7300 return false;
7301 if (pCtx->cs.u64Base != 0)
7302 return false;
7303
7304 /* Check opcode. */
7305 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7306 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7307 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7308 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7309 if (RT_FAILURE(rc))
7310 return false;
7311 if (abInstr[0] != 0xed)
7312 return false;
7313
7314 return true;
7315}
7316
7317
7318/**
7319 * VM-exit exception handler for \#GP (General-protection exception).
7320 *
7321 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7322 */
7323static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7324{
7325 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7326 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7327
7328 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7329 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7330#ifndef IN_NEM_DARWIN
7331 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7332 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7333 { /* likely */ }
7334 else
7335#endif
7336 {
7337#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7338# ifndef IN_NEM_DARWIN
7339 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7340# else
7341 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7342# endif
7343#endif
7344 /*
7345 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7346 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7347 */
7348 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7349 AssertRCReturn(rc, rc);
7350 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7351 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7352
7353 if ( pVmxTransient->fIsNestedGuest
7354 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7355 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7356 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7357 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7358 else
7359 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7360 return rc;
7361 }
7362
7363#ifndef IN_NEM_DARWIN
7364 Assert(CPUMIsGuestInRealModeEx(pCtx));
7365 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7366 Assert(!pVmxTransient->fIsNestedGuest);
7367
7368 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7369 AssertRCReturn(rc, rc);
7370
7371 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7372 if (rcStrict == VINF_SUCCESS)
7373 {
7374 if (!CPUMIsGuestInRealModeEx(pCtx))
7375 {
7376 /*
7377 * The guest is no longer in real-mode, check if we can continue executing the
7378 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7379 */
7380 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7381 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7382 {
7383 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7384 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7385 }
7386 else
7387 {
7388 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7389 rcStrict = VINF_EM_RESCHEDULE;
7390 }
7391 }
7392 else
7393 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7394 }
7395 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7396 {
7397 rcStrict = VINF_SUCCESS;
7398 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7399 }
7400 return VBOXSTRICTRC_VAL(rcStrict);
7401#endif
7402}
7403
7404
7405/**
7406 * VM-exit exception handler for \#DE (Divide Error).
7407 *
7408 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7409 */
7410static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7411{
7412 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7413 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7414
7415 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7416 AssertRCReturn(rc, rc);
7417
7418 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7419 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7420 {
7421 uint8_t cbInstr = 0;
7422 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7423 if (rc2 == VINF_SUCCESS)
7424 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7425 else if (rc2 == VERR_NOT_FOUND)
7426 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7427 else
7428 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7429 }
7430 else
7431 rcStrict = VINF_SUCCESS; /* Do nothing. */
7432
7433 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7434 if (RT_FAILURE(rcStrict))
7435 {
7436 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7437 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7438 rcStrict = VINF_SUCCESS;
7439 }
7440
7441 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7442 return VBOXSTRICTRC_VAL(rcStrict);
7443}
7444
7445
7446/**
7447 * VM-exit exception handler wrapper for all other exceptions that are not handled
7448 * by a specific handler.
7449 *
7450 * This simply re-injects the exception back into the VM without any special
7451 * processing.
7452 *
7453 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7454 */
7455static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7456{
7457 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7458
7459#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7460# ifndef IN_NEM_DARWIN
7461 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7462 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7463 ("uVector=%#x u32XcptBitmap=%#X32\n",
7464 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7465 NOREF(pVmcsInfo);
7466# endif
7467#endif
7468
7469 /*
7470 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7471 * would have been handled while checking exits due to event delivery.
7472 */
7473 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7474
7475#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7476 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7477 AssertRCReturn(rc, rc);
7478 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7479#endif
7480
7481#ifdef VBOX_WITH_STATISTICS
7482 switch (uVector)
7483 {
7484 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7485 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7486 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7487 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7488 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7489 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7490 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7491 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7492 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7493 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7494 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7495 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7496 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7497 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7498 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7499 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7500 default:
7501 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7502 break;
7503 }
7504#endif
7505
7506 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7507 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7508 NOREF(uVector);
7509
7510 /* Re-inject the original exception into the guest. */
7511 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7512 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7513 return VINF_SUCCESS;
7514}
7515
7516
7517/**
7518 * VM-exit exception handler for all exceptions (except NMIs!).
7519 *
7520 * @remarks This may be called for both guests and nested-guests. Take care to not
7521 * make assumptions and avoid doing anything that is not relevant when
7522 * executing a nested-guest (e.g., Mesa driver hacks).
7523 */
7524static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7525{
7526 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7527
7528 /*
7529 * If this VM-exit occurred while delivering an event through the guest IDT, take
7530 * action based on the return code and additional hints (e.g. for page-faults)
7531 * that will be updated in the VMX transient structure.
7532 */
7533 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7534 if (rcStrict == VINF_SUCCESS)
7535 {
7536 /*
7537 * If an exception caused a VM-exit due to delivery of an event, the original
7538 * event may have to be re-injected into the guest. We shall reinject it and
7539 * continue guest execution. However, page-fault is a complicated case and
7540 * needs additional processing done in vmxHCExitXcptPF().
7541 */
7542 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7543 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7544 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7545 || uVector == X86_XCPT_PF)
7546 {
7547 switch (uVector)
7548 {
7549 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7550 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7551 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7552 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7553 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7554 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7555 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7556 default:
7557 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7558 }
7559 }
7560 /* else: inject pending event before resuming guest execution. */
7561 }
7562 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7563 {
7564 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7565 rcStrict = VINF_SUCCESS;
7566 }
7567
7568 return rcStrict;
7569}
7570/** @} */
7571
7572
7573/** @name VM-exit handlers.
7574 * @{
7575 */
7576/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7577/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7578/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7579
7580/**
7581 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7582 */
7583HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7584{
7585 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7586 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7587
7588#ifndef IN_NEM_DARWIN
7589 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7590 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7591 return VINF_SUCCESS;
7592 return VINF_EM_RAW_INTERRUPT;
7593#else
7594 return VINF_SUCCESS;
7595#endif
7596}
7597
7598
7599/**
7600 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7601 * VM-exit.
7602 */
7603HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7604{
7605 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7606 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7607
7608 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7609
7610 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7611 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7612 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7613
7614 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7615 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7616 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7617 NOREF(pVmcsInfo);
7618
7619 VBOXSTRICTRC rcStrict;
7620 switch (uExitIntType)
7621 {
7622#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7623 /*
7624 * Host physical NMIs:
7625 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7626 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7627 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7628 *
7629 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7630 * See Intel spec. 27.5.5 "Updating Non-Register State".
7631 */
7632 case VMX_EXIT_INT_INFO_TYPE_NMI:
7633 {
7634 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7635 break;
7636 }
7637#endif
7638
7639 /*
7640 * Privileged software exceptions (#DB from ICEBP),
7641 * Software exceptions (#BP and #OF),
7642 * Hardware exceptions:
7643 * Process the required exceptions and resume guest execution if possible.
7644 */
7645 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7646 Assert(uVector == X86_XCPT_DB);
7647 RT_FALL_THRU();
7648 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7649 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7650 RT_FALL_THRU();
7651 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7652 {
7653 NOREF(uVector);
7654 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7655 | HMVMX_READ_EXIT_INSTR_LEN
7656 | HMVMX_READ_IDT_VECTORING_INFO
7657 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7658 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7659 break;
7660 }
7661
7662 default:
7663 {
7664 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7665 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7666 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7667 break;
7668 }
7669 }
7670
7671 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7672 return rcStrict;
7673}
7674
7675
7676/**
7677 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7678 */
7679HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7680{
7681 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7682
7683 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7684 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7685 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7686
7687 /* Evaluate and deliver pending events and resume guest execution. */
7688 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7689 return VINF_SUCCESS;
7690}
7691
7692
7693/**
7694 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7695 */
7696HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7697{
7698 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7699
7700 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7701 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7702 {
7703 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7704 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7705 }
7706
7707 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7708
7709 /*
7710 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7711 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7712 */
7713 uint32_t fIntrState;
7714 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7715 AssertRC(rc);
7716 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7717 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7718 {
7719 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7720
7721 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7722 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7723 AssertRC(rc);
7724 }
7725
7726 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7727 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7728
7729 /* Evaluate and deliver pending events and resume guest execution. */
7730 return VINF_SUCCESS;
7731}
7732
7733
7734/**
7735 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7736 */
7737HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7738{
7739 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7740 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7741}
7742
7743
7744/**
7745 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7746 */
7747HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7748{
7749 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7750 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7751}
7752
7753
7754/**
7755 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7756 */
7757HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7758{
7759 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7760
7761 /*
7762 * Get the state we need and update the exit history entry.
7763 */
7764 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7765 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7766 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7767 AssertRCReturn(rc, rc);
7768
7769 VBOXSTRICTRC rcStrict;
7770 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7771 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7772 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7773 if (!pExitRec)
7774 {
7775 /*
7776 * Regular CPUID instruction execution.
7777 */
7778 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7779 if (rcStrict == VINF_SUCCESS)
7780 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7781 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7782 {
7783 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7784 rcStrict = VINF_SUCCESS;
7785 }
7786 }
7787 else
7788 {
7789 /*
7790 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7791 */
7792 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7793 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7794 AssertRCReturn(rc2, rc2);
7795
7796 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7797 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7798
7799 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7800 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7801
7802 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7803 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7804 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7805 }
7806 return rcStrict;
7807}
7808
7809
7810/**
7811 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7812 */
7813HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7814{
7815 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7816
7817 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7818 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7819 AssertRCReturn(rc, rc);
7820
7821 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7822 return VINF_EM_RAW_EMULATE_INSTR;
7823
7824 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7825 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7826}
7827
7828
7829/**
7830 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7831 */
7832HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7833{
7834 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7835
7836 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7837 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7838 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7839 AssertRCReturn(rc, rc);
7840
7841 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7842 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7843 {
7844 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7845 we must reset offsetting on VM-entry. See @bugref{6634}. */
7846 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7847 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7848 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7849 }
7850 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7851 {
7852 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7853 rcStrict = VINF_SUCCESS;
7854 }
7855 return rcStrict;
7856}
7857
7858
7859/**
7860 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7861 */
7862HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7863{
7864 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7865
7866 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7867 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7868 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7869 AssertRCReturn(rc, rc);
7870
7871 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7872 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7873 {
7874 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7875 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7876 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7877 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7878 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7879 }
7880 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7881 {
7882 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7883 rcStrict = VINF_SUCCESS;
7884 }
7885 return rcStrict;
7886}
7887
7888
7889/**
7890 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7891 */
7892HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7893{
7894 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7895
7896 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7897 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7898 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7899 AssertRCReturn(rc, rc);
7900
7901 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
7902 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7903 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7904 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7905 {
7906 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7907 rcStrict = VINF_SUCCESS;
7908 }
7909 return rcStrict;
7910}
7911
7912
7913/**
7914 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7915 */
7916HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7917{
7918 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7919
7920 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7921 if (EMAreHypercallInstructionsEnabled(pVCpu))
7922 {
7923 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7924 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
7925 | CPUMCTX_EXTRN_RFLAGS
7926 | CPUMCTX_EXTRN_CR0
7927 | CPUMCTX_EXTRN_SS
7928 | CPUMCTX_EXTRN_CS
7929 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
7930 AssertRCReturn(rc, rc);
7931
7932 /* Perform the hypercall. */
7933 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7934 if (rcStrict == VINF_SUCCESS)
7935 {
7936 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7937 AssertRCReturn(rc, rc);
7938 }
7939 else
7940 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7941 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7942 || RT_FAILURE(rcStrict));
7943
7944 /* If the hypercall changes anything other than guest's general-purpose registers,
7945 we would need to reload the guest changed bits here before VM-entry. */
7946 }
7947 else
7948 Log4Func(("Hypercalls not enabled\n"));
7949
7950 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7951 if (RT_FAILURE(rcStrict))
7952 {
7953 vmxHCSetPendingXcptUD(pVCpu);
7954 rcStrict = VINF_SUCCESS;
7955 }
7956
7957 return rcStrict;
7958}
7959
7960
7961/**
7962 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7963 */
7964HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7965{
7966 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7967#ifndef IN_NEM_DARWIN
7968 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7969#endif
7970
7971 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7972 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
7973 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7974 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7975 AssertRCReturn(rc, rc);
7976
7977 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7978
7979 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7980 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7981 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7982 {
7983 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7984 rcStrict = VINF_SUCCESS;
7985 }
7986 else
7987 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7988 VBOXSTRICTRC_VAL(rcStrict)));
7989 return rcStrict;
7990}
7991
7992
7993/**
7994 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7995 */
7996HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7997{
7998 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7999
8000 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8001 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8002 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
8003 AssertRCReturn(rc, rc);
8004
8005 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
8006 if (rcStrict == VINF_SUCCESS)
8007 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8008 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8009 {
8010 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8011 rcStrict = VINF_SUCCESS;
8012 }
8013
8014 return rcStrict;
8015}
8016
8017
8018/**
8019 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
8020 */
8021HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8022{
8023 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8024
8025 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8026 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8027 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8028 AssertRCReturn(rc, rc);
8029
8030 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
8031 if (RT_SUCCESS(rcStrict))
8032 {
8033 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8034 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
8035 rcStrict = VINF_SUCCESS;
8036 }
8037
8038 return rcStrict;
8039}
8040
8041
8042/**
8043 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
8044 * VM-exit.
8045 */
8046HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8047{
8048 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8049 return VINF_EM_RESET;
8050}
8051
8052
8053/**
8054 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8055 */
8056HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8057{
8058 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8059
8060 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8061 AssertRCReturn(rc, rc);
8062
8063 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
8064 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8065 rc = VINF_SUCCESS;
8066 else
8067 rc = VINF_EM_HALT;
8068
8069 if (rc != VINF_SUCCESS)
8070 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8071 return rc;
8072}
8073
8074
8075#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8076/**
8077 * VM-exit handler for instructions that result in a \#UD exception delivered to
8078 * the guest.
8079 */
8080HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8081{
8082 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8083 vmxHCSetPendingXcptUD(pVCpu);
8084 return VINF_SUCCESS;
8085}
8086#endif
8087
8088
8089/**
8090 * VM-exit handler for expiry of the VMX-preemption timer.
8091 */
8092HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8093{
8094 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8095
8096 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8097 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8098Log12(("vmxHCExitPreemptTimer:\n"));
8099
8100 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8101 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8102 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8103 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8104 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8105}
8106
8107
8108/**
8109 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8110 */
8111HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8112{
8113 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8114
8115 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8116 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8117 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8118 AssertRCReturn(rc, rc);
8119
8120 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8121 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8122 : HM_CHANGED_RAISED_XCPT_MASK);
8123
8124#ifndef IN_NEM_DARWIN
8125 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8126 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8127 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8128 {
8129 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8130 hmR0VmxUpdateStartVmFunction(pVCpu);
8131 }
8132#endif
8133
8134 return rcStrict;
8135}
8136
8137
8138/**
8139 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8140 */
8141HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8142{
8143 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8144
8145 /** @todo Enable the new code after finding a reliably guest test-case. */
8146#if 1
8147 return VERR_EM_INTERPRETER;
8148#else
8149 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8150 | HMVMX_READ_EXIT_INSTR_INFO
8151 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8152 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8153 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8154 AssertRCReturn(rc, rc);
8155
8156 /* Paranoia. Ensure this has a memory operand. */
8157 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8158
8159 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8160 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8161 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8162 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8163
8164 RTGCPTR GCPtrDesc;
8165 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8166
8167 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8168 GCPtrDesc, uType);
8169 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8170 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8171 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8172 {
8173 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8174 rcStrict = VINF_SUCCESS;
8175 }
8176 return rcStrict;
8177#endif
8178}
8179
8180
8181/**
8182 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8183 * VM-exit.
8184 */
8185HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8186{
8187 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8188 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8189 AssertRCReturn(rc, rc);
8190
8191 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8192 if (RT_FAILURE(rc))
8193 return rc;
8194
8195 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8196 NOREF(uInvalidReason);
8197
8198#ifdef VBOX_STRICT
8199 uint32_t fIntrState;
8200 uint64_t u64Val;
8201 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8202 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8203 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8204
8205 Log4(("uInvalidReason %u\n", uInvalidReason));
8206 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8207 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8208 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8209
8210 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8211 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8212 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8213 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8214 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8215 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8216 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8217 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8218 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8219 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8220 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8221 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8222# ifndef IN_NEM_DARWIN
8223 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8224 {
8225 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8226 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8227 }
8228
8229 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8230# endif
8231#endif
8232
8233 return VERR_VMX_INVALID_GUEST_STATE;
8234}
8235
8236/**
8237 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8238 */
8239HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8240{
8241 /*
8242 * Cumulative notes of all recognized but unexpected VM-exits.
8243 *
8244 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8245 * nested-paging is used.
8246 *
8247 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8248 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8249 * this function (and thereby stop VM execution) for handling such instructions.
8250 *
8251 *
8252 * VMX_EXIT_INIT_SIGNAL:
8253 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8254 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8255 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8256 *
8257 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8258 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8259 * See Intel spec. "23.8 Restrictions on VMX operation".
8260 *
8261 * VMX_EXIT_SIPI:
8262 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8263 * activity state is used. We don't make use of it as our guests don't have direct
8264 * access to the host local APIC.
8265 *
8266 * See Intel spec. 25.3 "Other Causes of VM-exits".
8267 *
8268 * VMX_EXIT_IO_SMI:
8269 * VMX_EXIT_SMI:
8270 * This can only happen if we support dual-monitor treatment of SMI, which can be
8271 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8272 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8273 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8274 *
8275 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8276 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8277 *
8278 * VMX_EXIT_ERR_MSR_LOAD:
8279 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8280 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8281 * execution.
8282 *
8283 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8284 *
8285 * VMX_EXIT_ERR_MACHINE_CHECK:
8286 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8287 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8288 * #MC exception abort class exception is raised. We thus cannot assume a
8289 * reasonable chance of continuing any sort of execution and we bail.
8290 *
8291 * See Intel spec. 15.1 "Machine-check Architecture".
8292 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8293 *
8294 * VMX_EXIT_PML_FULL:
8295 * VMX_EXIT_VIRTUALIZED_EOI:
8296 * VMX_EXIT_APIC_WRITE:
8297 * We do not currently support any of these features and thus they are all unexpected
8298 * VM-exits.
8299 *
8300 * VMX_EXIT_GDTR_IDTR_ACCESS:
8301 * VMX_EXIT_LDTR_TR_ACCESS:
8302 * VMX_EXIT_RDRAND:
8303 * VMX_EXIT_RSM:
8304 * VMX_EXIT_VMFUNC:
8305 * VMX_EXIT_ENCLS:
8306 * VMX_EXIT_RDSEED:
8307 * VMX_EXIT_XSAVES:
8308 * VMX_EXIT_XRSTORS:
8309 * VMX_EXIT_UMWAIT:
8310 * VMX_EXIT_TPAUSE:
8311 * VMX_EXIT_LOADIWKEY:
8312 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8313 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8314 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8315 *
8316 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8317 */
8318 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8319 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8320 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8321}
8322
8323
8324/**
8325 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8326 */
8327HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8328{
8329 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8330
8331 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8332
8333 /** @todo Optimize this: We currently drag in the whole MSR state
8334 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8335 * MSRs required. That would require changes to IEM and possibly CPUM too.
8336 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8337 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8338 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8339 int rc;
8340 switch (idMsr)
8341 {
8342 default:
8343 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8344 __FUNCTION__);
8345 AssertRCReturn(rc, rc);
8346 break;
8347 case MSR_K8_FS_BASE:
8348 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8349 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8350 AssertRCReturn(rc, rc);
8351 break;
8352 case MSR_K8_GS_BASE:
8353 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8354 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8355 AssertRCReturn(rc, rc);
8356 break;
8357 }
8358
8359 Log4Func(("ecx=%#RX32\n", idMsr));
8360
8361#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8362 Assert(!pVmxTransient->fIsNestedGuest);
8363 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8364 {
8365 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8366 && idMsr != MSR_K6_EFER)
8367 {
8368 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8369 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8370 }
8371 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8372 {
8373 Assert(pVmcsInfo->pvMsrBitmap);
8374 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8375 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8376 {
8377 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8378 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8379 }
8380 }
8381 }
8382#endif
8383
8384 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8385 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8386 if (rcStrict == VINF_SUCCESS)
8387 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8388 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8389 {
8390 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8391 rcStrict = VINF_SUCCESS;
8392 }
8393 else
8394 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8395 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8396
8397 return rcStrict;
8398}
8399
8400
8401/**
8402 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8403 */
8404HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8405{
8406 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8407
8408 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8409
8410 /*
8411 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8412 * Although we don't need to fetch the base as it will be overwritten shortly, while
8413 * loading guest-state we would also load the entire segment register including limit
8414 * and attributes and thus we need to load them here.
8415 */
8416 /** @todo Optimize this: We currently drag in the whole MSR state
8417 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8418 * MSRs required. That would require changes to IEM and possibly CPUM too.
8419 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8420 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8421 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8422 int rc;
8423 switch (idMsr)
8424 {
8425 default:
8426 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8427 __FUNCTION__);
8428 AssertRCReturn(rc, rc);
8429 break;
8430
8431 case MSR_K8_FS_BASE:
8432 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8433 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8434 AssertRCReturn(rc, rc);
8435 break;
8436 case MSR_K8_GS_BASE:
8437 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8438 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8439 AssertRCReturn(rc, rc);
8440 break;
8441 }
8442 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8443
8444 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8445 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8446
8447 if (rcStrict == VINF_SUCCESS)
8448 {
8449 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8450
8451 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8452 if ( idMsr == MSR_IA32_APICBASE
8453 || ( idMsr >= MSR_IA32_X2APIC_START
8454 && idMsr <= MSR_IA32_X2APIC_END))
8455 {
8456 /*
8457 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8458 * When full APIC register virtualization is implemented we'll have to make
8459 * sure APIC state is saved from the VMCS before IEM changes it.
8460 */
8461 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8462 }
8463 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8464 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8465 else if (idMsr == MSR_K6_EFER)
8466 {
8467 /*
8468 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8469 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8470 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8471 */
8472 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8473 }
8474
8475 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8476 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8477 {
8478 switch (idMsr)
8479 {
8480 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8481 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8482 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8483 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8484 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8485 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8486 default:
8487 {
8488#ifndef IN_NEM_DARWIN
8489 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8490 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8491 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8492 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8493#else
8494 AssertMsgFailed(("TODO\n"));
8495#endif
8496 break;
8497 }
8498 }
8499 }
8500#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8501 else
8502 {
8503 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8504 switch (idMsr)
8505 {
8506 case MSR_IA32_SYSENTER_CS:
8507 case MSR_IA32_SYSENTER_EIP:
8508 case MSR_IA32_SYSENTER_ESP:
8509 case MSR_K8_FS_BASE:
8510 case MSR_K8_GS_BASE:
8511 {
8512 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8513 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8514 }
8515
8516 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8517 default:
8518 {
8519 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8520 {
8521 /* EFER MSR writes are always intercepted. */
8522 if (idMsr != MSR_K6_EFER)
8523 {
8524 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8525 idMsr));
8526 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8527 }
8528 }
8529
8530 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8531 {
8532 Assert(pVmcsInfo->pvMsrBitmap);
8533 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8534 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8535 {
8536 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8537 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8538 }
8539 }
8540 break;
8541 }
8542 }
8543 }
8544#endif /* VBOX_STRICT */
8545 }
8546 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8547 {
8548 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8549 rcStrict = VINF_SUCCESS;
8550 }
8551 else
8552 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8553 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8554
8555 return rcStrict;
8556}
8557
8558
8559/**
8560 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8561 */
8562HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8563{
8564 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8565
8566 /** @todo The guest has likely hit a contended spinlock. We might want to
8567 * poke a schedule different guest VCPU. */
8568 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8569 if (RT_SUCCESS(rc))
8570 return VINF_EM_RAW_INTERRUPT;
8571
8572 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8573 return rc;
8574}
8575
8576
8577/**
8578 * VM-exit handler for when the TPR value is lowered below the specified
8579 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8580 */
8581HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8582{
8583 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8584 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8585
8586 /*
8587 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8588 * We'll re-evaluate pending interrupts and inject them before the next VM
8589 * entry so we can just continue execution here.
8590 */
8591 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8592 return VINF_SUCCESS;
8593}
8594
8595
8596/**
8597 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8598 * VM-exit.
8599 *
8600 * @retval VINF_SUCCESS when guest execution can continue.
8601 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8602 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8603 * incompatible guest state for VMX execution (real-on-v86 case).
8604 */
8605HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8606{
8607 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8608 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8609
8610 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8611 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8612 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8613
8614 VBOXSTRICTRC rcStrict;
8615 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8616 uint64_t const uExitQual = pVmxTransient->uExitQual;
8617 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8618 switch (uAccessType)
8619 {
8620 /*
8621 * MOV to CRx.
8622 */
8623 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8624 {
8625 /*
8626 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8627 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8628 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8629 * PAE PDPTEs as well.
8630 */
8631 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8632 AssertRCReturn(rc, rc);
8633
8634 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8635#ifndef IN_NEM_DARWIN
8636 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8637#endif
8638 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8639 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8640
8641 /*
8642 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8643 * - When nested paging isn't used.
8644 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8645 * - We are executing in the VM debug loop.
8646 */
8647#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8648# ifndef IN_NEM_DARWIN
8649 Assert( iCrReg != 3
8650 || !VM_IS_VMX_NESTED_PAGING(pVM)
8651 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8652 || pVCpu->hmr0.s.fUsingDebugLoop);
8653# else
8654 Assert( iCrReg != 3
8655 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8656# endif
8657#endif
8658
8659 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8660 Assert( iCrReg != 8
8661 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8662
8663 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8664 AssertMsg( rcStrict == VINF_SUCCESS
8665 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8666
8667#ifndef IN_NEM_DARWIN
8668 /*
8669 * This is a kludge for handling switches back to real mode when we try to use
8670 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8671 * deal with special selector values, so we have to return to ring-3 and run
8672 * there till the selector values are V86 mode compatible.
8673 *
8674 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8675 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8676 * this function.
8677 */
8678 if ( iCrReg == 0
8679 && rcStrict == VINF_SUCCESS
8680 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8681 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8682 && (uOldCr0 & X86_CR0_PE)
8683 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8684 {
8685 /** @todo Check selectors rather than returning all the time. */
8686 Assert(!pVmxTransient->fIsNestedGuest);
8687 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8688 rcStrict = VINF_EM_RESCHEDULE_REM;
8689 }
8690#endif
8691
8692 break;
8693 }
8694
8695 /*
8696 * MOV from CRx.
8697 */
8698 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8699 {
8700 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8701 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8702
8703 /*
8704 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8705 * - When nested paging isn't used.
8706 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8707 * - We are executing in the VM debug loop.
8708 */
8709#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8710# ifndef IN_NEM_DARWIN
8711 Assert( iCrReg != 3
8712 || !VM_IS_VMX_NESTED_PAGING(pVM)
8713 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8714 || pVCpu->hmr0.s.fLeaveDone);
8715# else
8716 Assert( iCrReg != 3
8717 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8718# endif
8719#endif
8720
8721 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8722 Assert( iCrReg != 8
8723 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8724
8725 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8726 break;
8727 }
8728
8729 /*
8730 * CLTS (Clear Task-Switch Flag in CR0).
8731 */
8732 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8733 {
8734 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8735 break;
8736 }
8737
8738 /*
8739 * LMSW (Load Machine-Status Word into CR0).
8740 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8741 */
8742 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8743 {
8744 RTGCPTR GCPtrEffDst;
8745 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8746 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8747 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8748 if (fMemOperand)
8749 {
8750 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8751 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8752 }
8753 else
8754 GCPtrEffDst = NIL_RTGCPTR;
8755 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8756 break;
8757 }
8758
8759 default:
8760 {
8761 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8762 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8763 }
8764 }
8765
8766 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8767 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8768 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8769
8770 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8771 NOREF(pVM);
8772 return rcStrict;
8773}
8774
8775
8776/**
8777 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8778 * VM-exit.
8779 */
8780HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8781{
8782 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8783 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8784
8785 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8786 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8787 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8788 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8789#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8790 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8791 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8792 AssertRCReturn(rc, rc);
8793
8794 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8795 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8796 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8797 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8798 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8799 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8800 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8801 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8802
8803 /*
8804 * Update exit history to see if this exit can be optimized.
8805 */
8806 VBOXSTRICTRC rcStrict;
8807 PCEMEXITREC pExitRec = NULL;
8808 if ( !fGstStepping
8809 && !fDbgStepping)
8810 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8811 !fIOString
8812 ? !fIOWrite
8813 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8814 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8815 : !fIOWrite
8816 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8817 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8818 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8819 if (!pExitRec)
8820 {
8821 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8822 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8823
8824 uint32_t const cbValue = s_aIOSizes[uIOSize];
8825 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8826 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8827 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8828 if (fIOString)
8829 {
8830 /*
8831 * INS/OUTS - I/O String instruction.
8832 *
8833 * Use instruction-information if available, otherwise fall back on
8834 * interpreting the instruction.
8835 */
8836 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8837 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8838 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8839 if (fInsOutsInfo)
8840 {
8841 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8842 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8843 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8844 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8845 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8846 if (fIOWrite)
8847 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8848 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8849 else
8850 {
8851 /*
8852 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8853 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8854 * See Intel Instruction spec. for "INS".
8855 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8856 */
8857 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8858 }
8859 }
8860 else
8861 rcStrict = IEMExecOne(pVCpu);
8862
8863 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8864 fUpdateRipAlready = true;
8865 }
8866 else
8867 {
8868 /*
8869 * IN/OUT - I/O instruction.
8870 */
8871 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8872 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8873 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8874 if (fIOWrite)
8875 {
8876 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8877 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8878#ifndef IN_NEM_DARWIN
8879 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8880 && !pCtx->eflags.Bits.u1TF)
8881 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8882#endif
8883 }
8884 else
8885 {
8886 uint32_t u32Result = 0;
8887 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8888 if (IOM_SUCCESS(rcStrict))
8889 {
8890 /* Save result of I/O IN instr. in AL/AX/EAX. */
8891 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8892 }
8893#ifndef IN_NEM_DARWIN
8894 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8895 && !pCtx->eflags.Bits.u1TF)
8896 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8897#endif
8898 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8899 }
8900 }
8901
8902 if (IOM_SUCCESS(rcStrict))
8903 {
8904 if (!fUpdateRipAlready)
8905 {
8906 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8907 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8908 }
8909
8910 /*
8911 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8912 * while booting Fedora 17 64-bit guest.
8913 *
8914 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8915 */
8916 if (fIOString)
8917 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8918
8919 /*
8920 * If any I/O breakpoints are armed, we need to check if one triggered
8921 * and take appropriate action.
8922 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8923 */
8924#if 1
8925 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
8926#else
8927 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
8928 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
8929 AssertRCReturn(rc, rc);
8930#endif
8931
8932 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8933 * execution engines about whether hyper BPs and such are pending. */
8934 uint32_t const uDr7 = pCtx->dr[7];
8935 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8936 && X86_DR7_ANY_RW_IO(uDr7)
8937 && (pCtx->cr4 & X86_CR4_DE))
8938 || DBGFBpIsHwIoArmed(pVM)))
8939 {
8940 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8941
8942#ifndef IN_NEM_DARWIN
8943 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8944 VMMRZCallRing3Disable(pVCpu);
8945 HM_DISABLE_PREEMPT(pVCpu);
8946
8947 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8948
8949 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8950 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8951 {
8952 /* Raise #DB. */
8953 if (fIsGuestDbgActive)
8954 ASMSetDR6(pCtx->dr[6]);
8955 if (pCtx->dr[7] != uDr7)
8956 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8957
8958 vmxHCSetPendingXcptDB(pVCpu);
8959 }
8960 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8961 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8962 else if ( rcStrict2 != VINF_SUCCESS
8963 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8964 rcStrict = rcStrict2;
8965 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8966
8967 HM_RESTORE_PREEMPT();
8968 VMMRZCallRing3Enable(pVCpu);
8969#else
8970 /** @todo */
8971#endif
8972 }
8973 }
8974
8975#ifdef VBOX_STRICT
8976 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8977 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8978 Assert(!fIOWrite);
8979 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8980 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8981 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8982 Assert(fIOWrite);
8983 else
8984 {
8985# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8986 * statuses, that the VMM device and some others may return. See
8987 * IOM_SUCCESS() for guidance. */
8988 AssertMsg( RT_FAILURE(rcStrict)
8989 || rcStrict == VINF_SUCCESS
8990 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8991 || rcStrict == VINF_EM_DBG_BREAKPOINT
8992 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8993 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8994# endif
8995 }
8996#endif
8997 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8998 }
8999 else
9000 {
9001 /*
9002 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
9003 */
9004 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
9005 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
9006 AssertRCReturn(rc2, rc2);
9007 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
9008 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
9009 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
9010 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9011 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
9012 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
9013
9014 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9015 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9016
9017 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9018 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9019 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9020 }
9021 return rcStrict;
9022}
9023
9024
9025/**
9026 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
9027 * VM-exit.
9028 */
9029HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9030{
9031 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9032
9033 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
9034 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9035 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
9036 {
9037 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
9038 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9039 {
9040 uint32_t uErrCode;
9041 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
9042 {
9043 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9044 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
9045 }
9046 else
9047 uErrCode = 0;
9048
9049 RTGCUINTPTR GCPtrFaultAddress;
9050 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
9051 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
9052 else
9053 GCPtrFaultAddress = 0;
9054
9055 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9056
9057 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
9058 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
9059
9060 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
9061 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
9062 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9063 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9064 }
9065 }
9066
9067 /* Fall back to the interpreter to emulate the task-switch. */
9068 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9069 return VERR_EM_INTERPRETER;
9070}
9071
9072
9073/**
9074 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9075 */
9076HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9077{
9078 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9079
9080 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9081 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9082 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9083 AssertRC(rc);
9084 return VINF_EM_DBG_STEPPED;
9085}
9086
9087
9088/**
9089 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9090 */
9091HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9092{
9093 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9094 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9095
9096 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9097 | HMVMX_READ_EXIT_INSTR_LEN
9098 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9099 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9100 | HMVMX_READ_IDT_VECTORING_INFO
9101 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9102
9103 /*
9104 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9105 */
9106 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9107 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9108 {
9109 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9110 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9111 {
9112 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9113 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9114 }
9115 }
9116 else
9117 {
9118 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9119 return rcStrict;
9120 }
9121
9122 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9123 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9124 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9125 AssertRCReturn(rc, rc);
9126
9127 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9128 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9129 switch (uAccessType)
9130 {
9131#ifndef IN_NEM_DARWIN
9132 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9133 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9134 {
9135 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9136 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9137 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9138
9139 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9140 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9141 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9142 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9143 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9144
9145 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9146 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9147 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9148 if ( rcStrict == VINF_SUCCESS
9149 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9150 || rcStrict == VERR_PAGE_NOT_PRESENT)
9151 {
9152 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9153 | HM_CHANGED_GUEST_APIC_TPR);
9154 rcStrict = VINF_SUCCESS;
9155 }
9156 break;
9157 }
9158#else
9159 /** @todo */
9160#endif
9161
9162 default:
9163 {
9164 Log4Func(("uAccessType=%#x\n", uAccessType));
9165 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9166 break;
9167 }
9168 }
9169
9170 if (rcStrict != VINF_SUCCESS)
9171 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9172 return rcStrict;
9173}
9174
9175
9176/**
9177 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9178 * VM-exit.
9179 */
9180HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9181{
9182 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9183 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9184
9185 /*
9186 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9187 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9188 * must emulate the MOV DRx access.
9189 */
9190 if (!pVmxTransient->fIsNestedGuest)
9191 {
9192 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9193 if ( pVmxTransient->fWasGuestDebugStateActive
9194#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9195 && !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx
9196#endif
9197 )
9198 {
9199 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9200 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9201 }
9202
9203 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9204 && !pVmxTransient->fWasHyperDebugStateActive)
9205 {
9206 Assert(!DBGFIsStepping(pVCpu));
9207 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9208
9209 /* Whether we disable intercepting MOV DRx instructions and resume
9210 the current one, or emulate it and keep intercepting them is
9211 configurable. Though it usually comes down to whether there are
9212 any new DR6 & DR7 bits (RTM) we want to hide from the guest. */
9213#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9214 bool const fResumeInstruction = !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx;
9215#else
9216 bool const fResumeInstruction = true;
9217#endif
9218 if (fResumeInstruction)
9219 {
9220 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9221 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9222 AssertRC(rc);
9223 }
9224
9225#ifndef IN_NEM_DARWIN
9226 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9227 VMMRZCallRing3Disable(pVCpu);
9228 HM_DISABLE_PREEMPT(pVCpu);
9229
9230 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9231 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9232 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9233
9234 HM_RESTORE_PREEMPT();
9235 VMMRZCallRing3Enable(pVCpu);
9236#else
9237 CPUMR3NemActivateGuestDebugState(pVCpu);
9238 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9239 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9240#endif
9241
9242 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9243 if (fResumeInstruction)
9244 {
9245#ifdef VBOX_WITH_STATISTICS
9246 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9247 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9248 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9249 else
9250 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9251#endif
9252 return VINF_SUCCESS;
9253 }
9254 }
9255 }
9256
9257 /*
9258 * Import state. We must have DR7 loaded here as it's always consulted,
9259 * both for reading and writing. The other debug registers are never
9260 * exported as such.
9261 */
9262 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9263 int rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
9264 | CPUMCTX_EXTRN_GPRS_MASK
9265 | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9266 AssertRCReturn(rc, rc);
9267
9268 uint8_t const iGReg = VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual);
9269 uint8_t const iDrReg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9270 Log4Func(("cs:rip=%#04x:%08RX64 r%d %s dr%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iGReg,
9271 VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE ? "->" : "<-", iDrReg));
9272
9273 VBOXSTRICTRC rcStrict;
9274 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9275 {
9276 /*
9277 * Write DRx register.
9278 */
9279 rcStrict = IEMExecDecodedMovDRxWrite(pVCpu, pVmxTransient->cbExitInstr, iDrReg, iGReg);
9280 AssertMsg( rcStrict == VINF_SUCCESS
9281 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9282
9283 if (rcStrict == VINF_SUCCESS)
9284 {
9285 /** @todo r=bird: Not sure why we always flag DR7 as modified here, but I've
9286 * kept it for now to avoid breaking something non-obvious. */
9287 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9288 | HM_CHANGED_GUEST_DR7);
9289 /* Update the DR6 register if guest debug state is active, otherwise we'll
9290 trash it when calling CPUMR0DebugStateMaybeSaveGuestAndRestoreHost. */
9291 if (iDrReg == 6 && CPUMIsGuestDebugStateActive(pVCpu))
9292 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
9293 Log4Func(("r%d=%#RX64 => dr%d=%#RX64\n", iGReg, pVCpu->cpum.GstCtx.aGRegs[iGReg].u,
9294 iDrReg, pVCpu->cpum.GstCtx.dr[iDrReg]));
9295 }
9296 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9297 {
9298 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9299 rcStrict = VINF_SUCCESS;
9300 }
9301
9302 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9303 }
9304 else
9305 {
9306 /*
9307 * Read DRx register into a general purpose register.
9308 */
9309 rcStrict = IEMExecDecodedMovDRxRead(pVCpu, pVmxTransient->cbExitInstr, iGReg, iDrReg);
9310 AssertMsg( rcStrict == VINF_SUCCESS
9311 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9312
9313 if (rcStrict == VINF_SUCCESS)
9314 {
9315 if (iGReg == X86_GREG_xSP)
9316 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9317 | HM_CHANGED_GUEST_RSP);
9318 else
9319 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9320 }
9321 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9322 {
9323 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9324 rcStrict = VINF_SUCCESS;
9325 }
9326
9327 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9328 }
9329
9330 return rcStrict;
9331}
9332
9333
9334/**
9335 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9336 * Conditional VM-exit.
9337 */
9338HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9339{
9340 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9341
9342#ifndef IN_NEM_DARWIN
9343 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9344
9345 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9346 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9347 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9348 | HMVMX_READ_IDT_VECTORING_INFO
9349 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9350 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9351
9352 /*
9353 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9354 */
9355 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9356 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9357 {
9358 /*
9359 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9360 * instruction emulation to inject the original event. Otherwise, injecting the original event
9361 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9362 */
9363 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9364 { /* likely */ }
9365 else
9366 {
9367 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9368# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9369 /** @todo NSTVMX: Think about how this should be handled. */
9370 if (pVmxTransient->fIsNestedGuest)
9371 return VERR_VMX_IPE_3;
9372# endif
9373 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9374 }
9375 }
9376 else
9377 {
9378 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9379 return rcStrict;
9380 }
9381
9382 /*
9383 * Get sufficient state and update the exit history entry.
9384 */
9385 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9386 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9387 AssertRCReturn(rc, rc);
9388
9389 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9390 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9391 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9392 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9393 if (!pExitRec)
9394 {
9395 /*
9396 * If we succeed, resume guest execution.
9397 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9398 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9399 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9400 * weird case. See @bugref{6043}.
9401 */
9402 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9403/** @todo bird: We can probably just go straight to IOM here and assume that
9404 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9405 * well. However, we need to address that aliasing workarounds that
9406 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9407 *
9408 * Might also be interesting to see if we can get this done more or
9409 * less locklessly inside IOM. Need to consider the lookup table
9410 * updating and use a bit more carefully first (or do all updates via
9411 * rendezvous) */
9412 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9413 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9414 if ( rcStrict == VINF_SUCCESS
9415 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9416 || rcStrict == VERR_PAGE_NOT_PRESENT)
9417 {
9418 /* Successfully handled MMIO operation. */
9419 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9420 | HM_CHANGED_GUEST_APIC_TPR);
9421 rcStrict = VINF_SUCCESS;
9422 }
9423 }
9424 else
9425 {
9426 /*
9427 * Frequent exit or something needing probing. Call EMHistoryExec.
9428 */
9429 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9430 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9431
9432 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9433 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9434
9435 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9436 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9437 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9438 }
9439 return rcStrict;
9440#else
9441 AssertFailed();
9442 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9443#endif
9444}
9445
9446
9447/**
9448 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9449 * VM-exit.
9450 */
9451HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9452{
9453 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9454#ifndef IN_NEM_DARWIN
9455 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9456
9457 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9458 | HMVMX_READ_EXIT_INSTR_LEN
9459 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9460 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9461 | HMVMX_READ_IDT_VECTORING_INFO
9462 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9463 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9464
9465 /*
9466 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9467 */
9468 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9469 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9470 {
9471 /*
9472 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9473 * we shall resolve the nested #PF and re-inject the original event.
9474 */
9475 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9476 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9477 }
9478 else
9479 {
9480 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9481 return rcStrict;
9482 }
9483
9484 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9485 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9486 AssertRCReturn(rc, rc);
9487
9488 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9489 uint64_t const uExitQual = pVmxTransient->uExitQual;
9490 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9491
9492 RTGCUINT uErrorCode = 0;
9493 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9494 uErrorCode |= X86_TRAP_PF_ID;
9495 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9496 uErrorCode |= X86_TRAP_PF_RW;
9497 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9498 uErrorCode |= X86_TRAP_PF_P;
9499
9500 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9501 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9502
9503 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9504
9505 /*
9506 * Handle the pagefault trap for the nested shadow table.
9507 */
9508 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9509 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9510 TRPMResetTrap(pVCpu);
9511
9512 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9513 if ( rcStrict == VINF_SUCCESS
9514 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9515 || rcStrict == VERR_PAGE_NOT_PRESENT)
9516 {
9517 /* Successfully synced our nested page tables. */
9518 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9519 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9520 return VINF_SUCCESS;
9521 }
9522 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9523 return rcStrict;
9524
9525#else /* IN_NEM_DARWIN */
9526 PVM pVM = pVCpu->CTX_SUFF(pVM);
9527 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9528 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9529 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9530 vmxHCImportGuestRip(pVCpu);
9531 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9532
9533 /*
9534 * Ask PGM for information about the given GCPhys. We need to check if we're
9535 * out of sync first.
9536 */
9537 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9538 false,
9539 false };
9540 PGMPHYSNEMPAGEINFO Info;
9541 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9542 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9543 if (RT_SUCCESS(rc))
9544 {
9545 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9546 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9547 {
9548 if (State.fCanResume)
9549 {
9550 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9551 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9552 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9553 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9554 State.fDidSomething ? "" : " no-change"));
9555 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9556 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9557 return VINF_SUCCESS;
9558 }
9559 }
9560
9561 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9562 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9563 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9564 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9565 State.fDidSomething ? "" : " no-change"));
9566 }
9567 else
9568 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9569 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9570 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9571
9572 /*
9573 * Emulate the memory access, either access handler or special memory.
9574 */
9575 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9576 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9577 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9578 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9579 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9580
9581 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9582 AssertRCReturn(rc, rc);
9583
9584 VBOXSTRICTRC rcStrict;
9585 if (!pExitRec)
9586 rcStrict = IEMExecOne(pVCpu);
9587 else
9588 {
9589 /* Frequent access or probing. */
9590 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9591 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9592 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9593 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9594 }
9595
9596 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9597
9598 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9599 return rcStrict;
9600#endif /* IN_NEM_DARWIN */
9601}
9602
9603#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9604
9605/**
9606 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9607 */
9608HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9609{
9610 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9611
9612 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9613 | HMVMX_READ_EXIT_INSTR_INFO
9614 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9615 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9616 | CPUMCTX_EXTRN_SREG_MASK
9617 | CPUMCTX_EXTRN_HWVIRT
9618 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9619 AssertRCReturn(rc, rc);
9620
9621 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9622
9623 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9624 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9625
9626 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9627 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9628 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9629 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9630 {
9631 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9632 rcStrict = VINF_SUCCESS;
9633 }
9634 return rcStrict;
9635}
9636
9637
9638/**
9639 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9640 */
9641HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9642{
9643 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9644
9645 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9646 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9647 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9648 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9649 AssertRCReturn(rc, rc);
9650
9651 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9652
9653 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9654 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9655 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9656 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9657 {
9658 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9659 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9660 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9661 }
9662 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9663 return rcStrict;
9664}
9665
9666
9667/**
9668 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9669 */
9670HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9671{
9672 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9673
9674 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9675 | HMVMX_READ_EXIT_INSTR_INFO
9676 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9677 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9678 | CPUMCTX_EXTRN_SREG_MASK
9679 | CPUMCTX_EXTRN_HWVIRT
9680 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9681 AssertRCReturn(rc, rc);
9682
9683 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9684
9685 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9686 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9687
9688 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9689 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9690 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9691 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9692 {
9693 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9694 rcStrict = VINF_SUCCESS;
9695 }
9696 return rcStrict;
9697}
9698
9699
9700/**
9701 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9702 */
9703HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9704{
9705 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9706
9707 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9708 | HMVMX_READ_EXIT_INSTR_INFO
9709 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9710 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9711 | CPUMCTX_EXTRN_SREG_MASK
9712 | CPUMCTX_EXTRN_HWVIRT
9713 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9714 AssertRCReturn(rc, rc);
9715
9716 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9717
9718 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9719 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9720
9721 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9722 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9723 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9724 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9725 {
9726 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9727 rcStrict = VINF_SUCCESS;
9728 }
9729 return rcStrict;
9730}
9731
9732
9733/**
9734 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9735 */
9736HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9737{
9738 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9739
9740 /*
9741 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9742 * thus might not need to import the shadow VMCS state, it's safer just in case
9743 * code elsewhere dares look at unsynced VMCS fields.
9744 */
9745 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9746 | HMVMX_READ_EXIT_INSTR_INFO
9747 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9748 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9749 | CPUMCTX_EXTRN_SREG_MASK
9750 | CPUMCTX_EXTRN_HWVIRT
9751 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9752 AssertRCReturn(rc, rc);
9753
9754 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9755
9756 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9757 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9758 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9759
9760 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9761 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9762 {
9763 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9764
9765# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9766 /* Try for exit optimization. This is on the following instruction
9767 because it would be a waste of time to have to reinterpret the
9768 already decoded vmwrite instruction. */
9769 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9770 if (pExitRec)
9771 {
9772 /* Frequent access or probing. */
9773 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9774 AssertRCReturn(rc, rc);
9775
9776 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9777 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9778 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9779 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9780 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9781 }
9782# endif
9783 }
9784 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9785 {
9786 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9787 rcStrict = VINF_SUCCESS;
9788 }
9789 return rcStrict;
9790}
9791
9792
9793/**
9794 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9795 */
9796HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9797{
9798 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9799
9800 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9801 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9802 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9803 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9804 AssertRCReturn(rc, rc);
9805
9806 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9807
9808 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9809 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9810 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9811 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9812 {
9813 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9814 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9815 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9816 }
9817 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9818 return rcStrict;
9819}
9820
9821
9822/**
9823 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9824 */
9825HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9826{
9827 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9828
9829 /*
9830 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9831 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9832 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9833 */
9834 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9835 | HMVMX_READ_EXIT_INSTR_INFO
9836 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9837 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9838 | CPUMCTX_EXTRN_SREG_MASK
9839 | CPUMCTX_EXTRN_HWVIRT
9840 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9841 AssertRCReturn(rc, rc);
9842
9843 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9844
9845 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9846 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9847 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9848
9849 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9850 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9851 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9852 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9853 {
9854 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9855 rcStrict = VINF_SUCCESS;
9856 }
9857 return rcStrict;
9858}
9859
9860
9861/**
9862 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9863 */
9864HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9865{
9866 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9867
9868 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9869 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9870 | CPUMCTX_EXTRN_HWVIRT
9871 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9872 AssertRCReturn(rc, rc);
9873
9874 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9875
9876 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9877 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9878 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9879 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9880 {
9881 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9882 rcStrict = VINF_SUCCESS;
9883 }
9884 return rcStrict;
9885}
9886
9887
9888/**
9889 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9890 */
9891HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9892{
9893 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9894
9895 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9896 | HMVMX_READ_EXIT_INSTR_INFO
9897 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9898 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9899 | CPUMCTX_EXTRN_SREG_MASK
9900 | CPUMCTX_EXTRN_HWVIRT
9901 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9902 AssertRCReturn(rc, rc);
9903
9904 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9905
9906 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9907 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9908
9909 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9910 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9911 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9912 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9913 {
9914 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9915 rcStrict = VINF_SUCCESS;
9916 }
9917 return rcStrict;
9918}
9919
9920
9921/**
9922 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9923 */
9924HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9925{
9926 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9927
9928 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9929 | HMVMX_READ_EXIT_INSTR_INFO
9930 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9931 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9932 | CPUMCTX_EXTRN_SREG_MASK
9933 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9934 AssertRCReturn(rc, rc);
9935
9936 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9937
9938 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9939 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9940
9941 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9942 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9943 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9944 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9945 {
9946 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9947 rcStrict = VINF_SUCCESS;
9948 }
9949 return rcStrict;
9950}
9951
9952
9953# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9954/**
9955 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9956 */
9957HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9958{
9959 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9960
9961 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9962 | HMVMX_READ_EXIT_INSTR_INFO
9963 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9964 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9965 | CPUMCTX_EXTRN_SREG_MASK
9966 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9967 AssertRCReturn(rc, rc);
9968
9969 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9970
9971 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9972 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9973
9974 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9975 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9976 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9977 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9978 {
9979 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9980 rcStrict = VINF_SUCCESS;
9981 }
9982 return rcStrict;
9983}
9984# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9985#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9986/** @} */
9987
9988
9989#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9990/** @name Nested-guest VM-exit handlers.
9991 * @{
9992 */
9993/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9994/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9995/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9996
9997/**
9998 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9999 * Conditional VM-exit.
10000 */
10001HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10002{
10003 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10004
10005 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
10006
10007 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
10008 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
10009 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
10010
10011 switch (uExitIntType)
10012 {
10013# ifndef IN_NEM_DARWIN
10014 /*
10015 * Physical NMIs:
10016 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
10017 */
10018 case VMX_EXIT_INT_INFO_TYPE_NMI:
10019 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
10020# endif
10021
10022 /*
10023 * Hardware exceptions,
10024 * Software exceptions,
10025 * Privileged software exceptions:
10026 * Figure out if the exception must be delivered to the guest or the nested-guest.
10027 */
10028 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10029 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10030 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10031 {
10032 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10033 | HMVMX_READ_EXIT_INSTR_LEN
10034 | HMVMX_READ_IDT_VECTORING_INFO
10035 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10036
10037 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10038 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
10039 {
10040 /* Exit qualification is required for debug and page-fault exceptions. */
10041 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10042
10043 /*
10044 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
10045 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
10046 * length. However, if delivery of a software interrupt, software exception or privileged
10047 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
10048 */
10049 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10050 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
10051 pVmxTransient->uExitIntErrorCode,
10052 pVmxTransient->uIdtVectoringInfo,
10053 pVmxTransient->uIdtVectoringErrorCode);
10054#ifdef DEBUG_ramshankar
10055 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10056 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
10057 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
10058 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
10059 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
10060 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
10061#endif
10062 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
10063 }
10064
10065 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
10066 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10067 return vmxHCExitXcpt(pVCpu, pVmxTransient);
10068 }
10069
10070 /*
10071 * Software interrupts:
10072 * VM-exits cannot be caused by software interrupts.
10073 *
10074 * External interrupts:
10075 * This should only happen when "acknowledge external interrupts on VM-exit"
10076 * control is set. However, we never set this when executing a guest or
10077 * nested-guest. For nested-guests it is emulated while injecting interrupts into
10078 * the guest.
10079 */
10080 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10081 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
10082 default:
10083 {
10084 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
10085 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10086 }
10087 }
10088}
10089
10090
10091/**
10092 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10093 * Unconditional VM-exit.
10094 */
10095HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10096{
10097 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10098 return IEMExecVmxVmexitTripleFault(pVCpu);
10099}
10100
10101
10102/**
10103 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10104 */
10105HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10106{
10107 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10108
10109 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10110 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10111 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10112}
10113
10114
10115/**
10116 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10117 */
10118HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10119{
10120 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10121
10122 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10123 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10124 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10125}
10126
10127
10128/**
10129 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10130 * Unconditional VM-exit.
10131 */
10132HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10133{
10134 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10135
10136 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10137 | HMVMX_READ_EXIT_INSTR_LEN
10138 | HMVMX_READ_IDT_VECTORING_INFO
10139 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10140
10141 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10142 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10143 pVmxTransient->uIdtVectoringErrorCode);
10144 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10145}
10146
10147
10148/**
10149 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10150 */
10151HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10152{
10153 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10154
10155 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10156 {
10157 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10158 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10159 }
10160 return vmxHCExitHlt(pVCpu, pVmxTransient);
10161}
10162
10163
10164/**
10165 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10166 */
10167HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10168{
10169 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10170
10171 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10172 {
10173 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10174 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10175 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10176 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10177 }
10178 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10179}
10180
10181
10182/**
10183 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10184 */
10185HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10186{
10187 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10188
10189 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10190 {
10191 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10192 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10193 }
10194 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10195}
10196
10197
10198/**
10199 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10200 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10201 */
10202HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10203{
10204 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10205
10206 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10207 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10208
10209 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10210
10211 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10212 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10213 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10214
10215 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10216 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10217 u64VmcsField &= UINT64_C(0xffffffff);
10218
10219 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10220 {
10221 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10222 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10223 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10224 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10225 }
10226
10227 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10228 return vmxHCExitVmread(pVCpu, pVmxTransient);
10229 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10230}
10231
10232
10233/**
10234 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10235 */
10236HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10237{
10238 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10239
10240 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10241 {
10242 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10243 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10244 }
10245
10246 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10247}
10248
10249
10250/**
10251 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10252 * Conditional VM-exit.
10253 */
10254HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10255{
10256 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10257
10258 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10259 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10260
10261 VBOXSTRICTRC rcStrict;
10262 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10263 switch (uAccessType)
10264 {
10265 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10266 {
10267 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10268 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10269 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10270 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10271
10272 bool fIntercept;
10273 switch (iCrReg)
10274 {
10275 case 0:
10276 case 4:
10277 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10278 break;
10279
10280 case 3:
10281 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10282 break;
10283
10284 case 8:
10285 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10286 break;
10287
10288 default:
10289 fIntercept = false;
10290 break;
10291 }
10292 if (fIntercept)
10293 {
10294 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10295 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10296 }
10297 else
10298 {
10299 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10300 AssertRCReturn(rc, rc);
10301 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10302 }
10303 break;
10304 }
10305
10306 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10307 {
10308 /*
10309 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10310 * CR2 reads do not cause a VM-exit.
10311 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10312 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10313 */
10314 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10315 if ( iCrReg == 3
10316 || iCrReg == 8)
10317 {
10318 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10319 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10320 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10321 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10322 {
10323 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10324 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10325 }
10326 else
10327 {
10328 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10329 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10330 }
10331 }
10332 else
10333 {
10334 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10335 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10336 }
10337 break;
10338 }
10339
10340 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10341 {
10342 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10343 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10344 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10345 if ( (uGstHostMask & X86_CR0_TS)
10346 && (uReadShadow & X86_CR0_TS))
10347 {
10348 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10349 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10350 }
10351 else
10352 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10353 break;
10354 }
10355
10356 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10357 {
10358 RTGCPTR GCPtrEffDst;
10359 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10360 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10361 if (fMemOperand)
10362 {
10363 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10364 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10365 }
10366 else
10367 GCPtrEffDst = NIL_RTGCPTR;
10368
10369 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10370 {
10371 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10372 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10373 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10374 }
10375 else
10376 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10377 break;
10378 }
10379
10380 default:
10381 {
10382 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10383 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10384 }
10385 }
10386
10387 if (rcStrict == VINF_IEM_RAISED_XCPT)
10388 {
10389 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10390 rcStrict = VINF_SUCCESS;
10391 }
10392 return rcStrict;
10393}
10394
10395
10396/**
10397 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10398 * Conditional VM-exit.
10399 */
10400HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10401{
10402 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10403
10404 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10405 {
10406 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10407 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10408 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10409 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10410 }
10411 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10412}
10413
10414
10415/**
10416 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10417 * Conditional VM-exit.
10418 */
10419HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10420{
10421 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10422
10423 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10424
10425 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10426 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10427 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10428
10429 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10430 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10431 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10432 {
10433 /*
10434 * IN/OUT instruction:
10435 * - Provides VM-exit instruction length.
10436 *
10437 * INS/OUTS instruction:
10438 * - Provides VM-exit instruction length.
10439 * - Provides Guest-linear address.
10440 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10441 */
10442 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10443 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10444
10445 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10446 pVmxTransient->ExitInstrInfo.u = 0;
10447 pVmxTransient->uGuestLinearAddr = 0;
10448
10449 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10450 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10451 if (fIOString)
10452 {
10453 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10454 if (fVmxInsOutsInfo)
10455 {
10456 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10457 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10458 }
10459 }
10460
10461 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10462 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10463 }
10464 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10465}
10466
10467
10468/**
10469 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10470 */
10471HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10472{
10473 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10474
10475 uint32_t fMsrpm;
10476 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10477 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10478 else
10479 fMsrpm = VMXMSRPM_EXIT_RD;
10480
10481 if (fMsrpm & VMXMSRPM_EXIT_RD)
10482 {
10483 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10484 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10485 }
10486 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10487}
10488
10489
10490/**
10491 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10492 */
10493HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10494{
10495 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10496
10497 uint32_t fMsrpm;
10498 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10499 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10500 else
10501 fMsrpm = VMXMSRPM_EXIT_WR;
10502
10503 if (fMsrpm & VMXMSRPM_EXIT_WR)
10504 {
10505 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10506 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10507 }
10508 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10509}
10510
10511
10512/**
10513 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10514 */
10515HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10516{
10517 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10518
10519 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10520 {
10521 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10522 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10523 }
10524 return vmxHCExitMwait(pVCpu, pVmxTransient);
10525}
10526
10527
10528/**
10529 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10530 * VM-exit.
10531 */
10532HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10533{
10534 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10535
10536 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10537 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10538 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10539 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10540}
10541
10542
10543/**
10544 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10545 */
10546HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10547{
10548 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10549
10550 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10551 {
10552 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10553 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10554 }
10555 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10556}
10557
10558
10559/**
10560 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10561 */
10562HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10563{
10564 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10565
10566 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10567 * PAUSE when executing a nested-guest? If it does not, we would not need
10568 * to check for the intercepts here. Just call VM-exit... */
10569
10570 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10571 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10572 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10573 {
10574 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10575 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10576 }
10577 return vmxHCExitPause(pVCpu, pVmxTransient);
10578}
10579
10580
10581/**
10582 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10583 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10584 */
10585HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10586{
10587 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10588
10589 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10590 {
10591 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10592 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10593 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10594 }
10595 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10596}
10597
10598
10599/**
10600 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10601 * VM-exit.
10602 */
10603HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10604{
10605 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10606
10607 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10608 | HMVMX_READ_EXIT_INSTR_LEN
10609 | HMVMX_READ_IDT_VECTORING_INFO
10610 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10611
10612 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10613
10614 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10615 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10616
10617 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10618 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10619 pVmxTransient->uIdtVectoringErrorCode);
10620 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10621}
10622
10623
10624/**
10625 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10626 * Conditional VM-exit.
10627 */
10628HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10629{
10630 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10631
10632 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10633 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10634 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10635}
10636
10637
10638/**
10639 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10640 * Conditional VM-exit.
10641 */
10642HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10643{
10644 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10645
10646 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10647 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10648 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10649}
10650
10651
10652/**
10653 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10654 */
10655HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10656{
10657 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10658
10659 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10660 {
10661 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10662 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10663 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10664 }
10665 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10666}
10667
10668
10669/**
10670 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10671 */
10672HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10673{
10674 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10675
10676 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10677 {
10678 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10679 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10680 }
10681 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10682}
10683
10684
10685/**
10686 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10687 */
10688HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10689{
10690 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10691
10692 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10693 {
10694 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10695 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10696 | HMVMX_READ_EXIT_INSTR_INFO
10697 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10698 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10699 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10700 }
10701 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10702}
10703
10704
10705/**
10706 * Nested-guest VM-exit handler for invalid-guest state
10707 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10708 */
10709HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10710{
10711 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10712
10713 /*
10714 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10715 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10716 * Handle it like it's in an invalid guest state of the outer guest.
10717 *
10718 * When the fast path is implemented, this should be changed to cause the corresponding
10719 * nested-guest VM-exit.
10720 */
10721 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10722}
10723
10724
10725/**
10726 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10727 * and only provide the instruction length.
10728 *
10729 * Unconditional VM-exit.
10730 */
10731HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10732{
10733 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10734
10735#ifdef VBOX_STRICT
10736 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10737 switch (pVmxTransient->uExitReason)
10738 {
10739 case VMX_EXIT_ENCLS:
10740 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10741 break;
10742
10743 case VMX_EXIT_VMFUNC:
10744 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10745 break;
10746 }
10747#endif
10748
10749 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10750 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10751}
10752
10753
10754/**
10755 * Nested-guest VM-exit handler for instructions that provide instruction length as
10756 * well as more information.
10757 *
10758 * Unconditional VM-exit.
10759 */
10760HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10761{
10762 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10763
10764# ifdef VBOX_STRICT
10765 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10766 switch (pVmxTransient->uExitReason)
10767 {
10768 case VMX_EXIT_GDTR_IDTR_ACCESS:
10769 case VMX_EXIT_LDTR_TR_ACCESS:
10770 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10771 break;
10772
10773 case VMX_EXIT_RDRAND:
10774 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10775 break;
10776
10777 case VMX_EXIT_RDSEED:
10778 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10779 break;
10780
10781 case VMX_EXIT_XSAVES:
10782 case VMX_EXIT_XRSTORS:
10783 /** @todo NSTVMX: Verify XSS-bitmap. */
10784 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10785 break;
10786
10787 case VMX_EXIT_UMWAIT:
10788 case VMX_EXIT_TPAUSE:
10789 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10790 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10791 break;
10792
10793 case VMX_EXIT_LOADIWKEY:
10794 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10795 break;
10796 }
10797# endif
10798
10799 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10800 | HMVMX_READ_EXIT_INSTR_LEN
10801 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10802 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10803 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10804}
10805
10806# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10807
10808/**
10809 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10810 * Conditional VM-exit.
10811 */
10812HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10813{
10814 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10815 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10816
10817 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10818 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10819 {
10820 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10821 | HMVMX_READ_EXIT_INSTR_LEN
10822 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10823 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10824 | HMVMX_READ_IDT_VECTORING_INFO
10825 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10826 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10827 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10828 AssertRCReturn(rc, rc);
10829
10830 /*
10831 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10832 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10833 * it's its problem to deal with that issue and we'll clear the recovered event.
10834 */
10835 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10836 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10837 { /*likely*/ }
10838 else
10839 {
10840 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10841 return rcStrict;
10842 }
10843 uint32_t const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10844
10845 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10846 uint64_t const uExitQual = pVmxTransient->uExitQual;
10847
10848 RTGCPTR GCPtrNestedFault;
10849 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10850 if (fIsLinearAddrValid)
10851 {
10852 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10853 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10854 }
10855 else
10856 GCPtrNestedFault = 0;
10857
10858 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10859 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10860 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10861 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10862 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10863
10864 PGMPTWALK Walk;
10865 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10866 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
10867 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
10868 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10869 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10870 if (RT_SUCCESS(rcStrict))
10871 return rcStrict;
10872
10873 if (fClearEventOnForward)
10874 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10875
10876 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10877 pVmxTransient->uIdtVectoringErrorCode);
10878 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10879 {
10880 VMXVEXITINFO const ExitInfo
10881 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10882 pVmxTransient->uExitQual,
10883 pVmxTransient->cbExitInstr,
10884 pVmxTransient->uGuestLinearAddr,
10885 pVmxTransient->uGuestPhysicalAddr);
10886 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10887 }
10888
10889 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10890 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10891 }
10892
10893 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10894}
10895
10896
10897/**
10898 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10899 * Conditional VM-exit.
10900 */
10901HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10902{
10903 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10904 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10905
10906 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10907 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10908 {
10909 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10910 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10911 AssertRCReturn(rc, rc);
10912
10913 PGMPTWALK Walk;
10914 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10915 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10916 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
10917 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10918 0 /* GCPtrNestedFault */, &Walk);
10919 if (RT_SUCCESS(rcStrict))
10920 {
10921 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10922 return rcStrict;
10923 }
10924
10925 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10926 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
10927 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10928
10929 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10930 pVmxTransient->uIdtVectoringErrorCode);
10931 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10932 }
10933
10934 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10935}
10936
10937# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10938
10939/** @} */
10940#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10941
10942
10943/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10944 * probes.
10945 *
10946 * The following few functions and associated structure contains the bloat
10947 * necessary for providing detailed debug events and dtrace probes as well as
10948 * reliable host side single stepping. This works on the principle of
10949 * "subclassing" the normal execution loop and workers. We replace the loop
10950 * method completely and override selected helpers to add necessary adjustments
10951 * to their core operation.
10952 *
10953 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10954 * any performance for debug and analysis features.
10955 *
10956 * @{
10957 */
10958
10959/**
10960 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10961 * the debug run loop.
10962 */
10963typedef struct VMXRUNDBGSTATE
10964{
10965 /** The RIP we started executing at. This is for detecting that we stepped. */
10966 uint64_t uRipStart;
10967 /** The CS we started executing with. */
10968 uint16_t uCsStart;
10969
10970 /** Whether we've actually modified the 1st execution control field. */
10971 bool fModifiedProcCtls : 1;
10972 /** Whether we've actually modified the 2nd execution control field. */
10973 bool fModifiedProcCtls2 : 1;
10974 /** Whether we've actually modified the exception bitmap. */
10975 bool fModifiedXcptBitmap : 1;
10976
10977 /** We desire the modified the CR0 mask to be cleared. */
10978 bool fClearCr0Mask : 1;
10979 /** We desire the modified the CR4 mask to be cleared. */
10980 bool fClearCr4Mask : 1;
10981 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10982 uint32_t fCpe1Extra;
10983 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10984 uint32_t fCpe1Unwanted;
10985 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10986 uint32_t fCpe2Extra;
10987 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10988 uint32_t bmXcptExtra;
10989 /** The sequence number of the Dtrace provider settings the state was
10990 * configured against. */
10991 uint32_t uDtraceSettingsSeqNo;
10992 /** VM-exits to check (one bit per VM-exit). */
10993 uint32_t bmExitsToCheck[3];
10994
10995 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10996 uint32_t fProcCtlsInitial;
10997 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10998 uint32_t fProcCtls2Initial;
10999 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
11000 uint32_t bmXcptInitial;
11001} VMXRUNDBGSTATE;
11002AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
11003typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
11004
11005
11006/**
11007 * Initializes the VMXRUNDBGSTATE structure.
11008 *
11009 * @param pVCpu The cross context virtual CPU structure of the
11010 * calling EMT.
11011 * @param pVmxTransient The VMX-transient structure.
11012 * @param pDbgState The debug state to initialize.
11013 */
11014static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11015{
11016 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
11017 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
11018
11019 pDbgState->fModifiedProcCtls = false;
11020 pDbgState->fModifiedProcCtls2 = false;
11021 pDbgState->fModifiedXcptBitmap = false;
11022 pDbgState->fClearCr0Mask = false;
11023 pDbgState->fClearCr4Mask = false;
11024 pDbgState->fCpe1Extra = 0;
11025 pDbgState->fCpe1Unwanted = 0;
11026 pDbgState->fCpe2Extra = 0;
11027 pDbgState->bmXcptExtra = 0;
11028 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
11029 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
11030 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
11031}
11032
11033
11034/**
11035 * Updates the VMSC fields with changes requested by @a pDbgState.
11036 *
11037 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
11038 * immediately before executing guest code, i.e. when interrupts are disabled.
11039 * We don't check status codes here as we cannot easily assert or return in the
11040 * latter case.
11041 *
11042 * @param pVCpu The cross context virtual CPU structure.
11043 * @param pVmxTransient The VMX-transient structure.
11044 * @param pDbgState The debug state.
11045 */
11046static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11047{
11048 /*
11049 * Ensure desired flags in VMCS control fields are set.
11050 * (Ignoring write failure here, as we're committed and it's just debug extras.)
11051 *
11052 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
11053 * there should be no stale data in pCtx at this point.
11054 */
11055 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11056 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
11057 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
11058 {
11059 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
11060 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
11061 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
11062 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
11063 pDbgState->fModifiedProcCtls = true;
11064 }
11065
11066 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11067 {
11068 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11069 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11070 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11071 pDbgState->fModifiedProcCtls2 = true;
11072 }
11073
11074 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11075 {
11076 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11077 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11078 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11079 pDbgState->fModifiedXcptBitmap = true;
11080 }
11081
11082 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11083 {
11084 pVmcsInfo->u64Cr0Mask = 0;
11085 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
11086 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11087 }
11088
11089 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11090 {
11091 pVmcsInfo->u64Cr4Mask = 0;
11092 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11093 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11094 }
11095
11096 NOREF(pVCpu);
11097}
11098
11099
11100/**
11101 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11102 * re-entry next time around.
11103 *
11104 * @returns Strict VBox status code (i.e. informational status codes too).
11105 * @param pVCpu The cross context virtual CPU structure.
11106 * @param pVmxTransient The VMX-transient structure.
11107 * @param pDbgState The debug state.
11108 * @param rcStrict The return code from executing the guest using single
11109 * stepping.
11110 */
11111static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11112 VBOXSTRICTRC rcStrict)
11113{
11114 /*
11115 * Restore VM-exit control settings as we may not reenter this function the
11116 * next time around.
11117 */
11118 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11119
11120 /* We reload the initial value, trigger what we can of recalculations the
11121 next time around. From the looks of things, that's all that's required atm. */
11122 if (pDbgState->fModifiedProcCtls)
11123 {
11124 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11125 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11126 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11127 AssertRC(rc2);
11128 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11129 }
11130
11131 /* We're currently the only ones messing with this one, so just restore the
11132 cached value and reload the field. */
11133 if ( pDbgState->fModifiedProcCtls2
11134 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11135 {
11136 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11137 AssertRC(rc2);
11138 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11139 }
11140
11141 /* If we've modified the exception bitmap, we restore it and trigger
11142 reloading and partial recalculation the next time around. */
11143 if (pDbgState->fModifiedXcptBitmap)
11144 {
11145 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pDbgState->bmXcptInitial);
11146 AssertRC(rc2);
11147 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11148 }
11149
11150 return rcStrict;
11151}
11152
11153
11154/**
11155 * Configures VM-exit controls for current DBGF and DTrace settings.
11156 *
11157 * This updates @a pDbgState and the VMCS execution control fields to reflect
11158 * the necessary VM-exits demanded by DBGF and DTrace.
11159 *
11160 * @param pVCpu The cross context virtual CPU structure.
11161 * @param pVmxTransient The VMX-transient structure. May update
11162 * fUpdatedTscOffsettingAndPreemptTimer.
11163 * @param pDbgState The debug state.
11164 */
11165static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11166{
11167#ifndef IN_NEM_DARWIN
11168 /*
11169 * Take down the dtrace serial number so we can spot changes.
11170 */
11171 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11172 ASMCompilerBarrier();
11173#endif
11174
11175 /*
11176 * We'll rebuild most of the middle block of data members (holding the
11177 * current settings) as we go along here, so start by clearing it all.
11178 */
11179 pDbgState->bmXcptExtra = 0;
11180 pDbgState->fCpe1Extra = 0;
11181 pDbgState->fCpe1Unwanted = 0;
11182 pDbgState->fCpe2Extra = 0;
11183 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11184 pDbgState->bmExitsToCheck[i] = 0;
11185
11186 /*
11187 * Software interrupts (INT XXh) - no idea how to trigger these...
11188 */
11189 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11190 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11191 || VBOXVMM_INT_SOFTWARE_ENABLED())
11192 {
11193 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11194 }
11195
11196 /*
11197 * INT3 breakpoints - triggered by #BP exceptions.
11198 */
11199 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11200 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11201
11202 /*
11203 * Exception bitmap and XCPT events+probes.
11204 */
11205 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11206 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11207 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11208
11209 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11210 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11211 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11212 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11213 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11214 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11215 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11216 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11217 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11218 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11219 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11220 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11221 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11222 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11223 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11224 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11225 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11226 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11227
11228 if (pDbgState->bmXcptExtra)
11229 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11230
11231 /*
11232 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11233 *
11234 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11235 * So, when adding/changing/removing please don't forget to update it.
11236 *
11237 * Some of the macros are picking up local variables to save horizontal space,
11238 * (being able to see it in a table is the lesser evil here).
11239 */
11240#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11241 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11242 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11243#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11244 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11245 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11246 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11247 } else do { } while (0)
11248#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11249 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11250 { \
11251 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11252 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11253 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11254 } else do { } while (0)
11255#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11256 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11257 { \
11258 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11259 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11260 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11261 } else do { } while (0)
11262#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11263 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11264 { \
11265 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11266 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11267 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11268 } else do { } while (0)
11269
11270 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11271 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11272 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11273 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11274 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11275
11276 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11277 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11278 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11279 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11280 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11281 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11282 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11283 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11284 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11285 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11286 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11287 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11288 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11289 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11290 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11291 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11292 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11293 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11294 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11295 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11296 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11297 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11298 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11299 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11300 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11301 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11302 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11303 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11304 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11305 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11306 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11307 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11308 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11309 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11310 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11311 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11312
11313 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11314 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11315 {
11316 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11317 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11318 AssertRC(rc);
11319
11320#if 0 /** @todo fix me */
11321 pDbgState->fClearCr0Mask = true;
11322 pDbgState->fClearCr4Mask = true;
11323#endif
11324 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11325 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11326 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11327 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11328 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11329 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11330 require clearing here and in the loop if we start using it. */
11331 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11332 }
11333 else
11334 {
11335 if (pDbgState->fClearCr0Mask)
11336 {
11337 pDbgState->fClearCr0Mask = false;
11338 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11339 }
11340 if (pDbgState->fClearCr4Mask)
11341 {
11342 pDbgState->fClearCr4Mask = false;
11343 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11344 }
11345 }
11346 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11347 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11348
11349 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11350 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11351 {
11352 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11353 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11354 }
11355 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11356 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11357
11358 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11359 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11360 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11361 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11362 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11363 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11364 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11365 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11366#if 0 /** @todo too slow, fix handler. */
11367 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11368#endif
11369 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11370
11371 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11372 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11373 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11374 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11375 {
11376 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11377 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11378 }
11379 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11380 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11381 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11382 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11383
11384 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11385 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11386 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11387 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11388 {
11389 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11390 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11391 }
11392 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11393 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11394 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11395 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11396
11397 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11398 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11399 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11400 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11401 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11402 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11403 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11404 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11405 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11406 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11407 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11408 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11409 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11410 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11411 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11412 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11413 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11414 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11415 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11416 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11417 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11418 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11419
11420#undef IS_EITHER_ENABLED
11421#undef SET_ONLY_XBM_IF_EITHER_EN
11422#undef SET_CPE1_XBM_IF_EITHER_EN
11423#undef SET_CPEU_XBM_IF_EITHER_EN
11424#undef SET_CPE2_XBM_IF_EITHER_EN
11425
11426 /*
11427 * Sanitize the control stuff.
11428 */
11429 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11430 if (pDbgState->fCpe2Extra)
11431 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11432 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11433 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11434#ifndef IN_NEM_DARWIN
11435 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11436 {
11437 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11438 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11439 }
11440#else
11441 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11442 {
11443 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11444 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11445 }
11446#endif
11447
11448 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11449 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11450 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11451 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11452}
11453
11454
11455/**
11456 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11457 * appropriate.
11458 *
11459 * The caller has checked the VM-exit against the
11460 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11461 * already, so we don't have to do that either.
11462 *
11463 * @returns Strict VBox status code (i.e. informational status codes too).
11464 * @param pVCpu The cross context virtual CPU structure.
11465 * @param pVmxTransient The VMX-transient structure.
11466 * @param uExitReason The VM-exit reason.
11467 *
11468 * @remarks The name of this function is displayed by dtrace, so keep it short
11469 * and to the point. No longer than 33 chars long, please.
11470 */
11471static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11472{
11473 /*
11474 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11475 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11476 *
11477 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11478 * does. Must add/change/remove both places. Same ordering, please.
11479 *
11480 * Added/removed events must also be reflected in the next section
11481 * where we dispatch dtrace events.
11482 */
11483 bool fDtrace1 = false;
11484 bool fDtrace2 = false;
11485 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11486 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11487 uint32_t uEventArg = 0;
11488#define SET_EXIT(a_EventSubName) \
11489 do { \
11490 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11491 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11492 } while (0)
11493#define SET_BOTH(a_EventSubName) \
11494 do { \
11495 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11496 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11497 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11498 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11499 } while (0)
11500 switch (uExitReason)
11501 {
11502 case VMX_EXIT_MTF:
11503 return vmxHCExitMtf(pVCpu, pVmxTransient);
11504
11505 case VMX_EXIT_XCPT_OR_NMI:
11506 {
11507 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11508 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11509 {
11510 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11511 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11512 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11513 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11514 {
11515 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11516 {
11517 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11518 uEventArg = pVmxTransient->uExitIntErrorCode;
11519 }
11520 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11521 switch (enmEvent1)
11522 {
11523 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11524 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11525 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11526 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11527 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11528 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11529 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11530 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11531 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11532 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11533 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11534 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11535 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11536 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11537 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11538 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11539 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11540 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11541 default: break;
11542 }
11543 }
11544 else
11545 AssertFailed();
11546 break;
11547
11548 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11549 uEventArg = idxVector;
11550 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11551 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11552 break;
11553 }
11554 break;
11555 }
11556
11557 case VMX_EXIT_TRIPLE_FAULT:
11558 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11559 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11560 break;
11561 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11562 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11563 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11564 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11565 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11566
11567 /* Instruction specific VM-exits: */
11568 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11569 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11570 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11571 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11572 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11573 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11574 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11575 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11576 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11577 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11578 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11579 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11580 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11581 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11582 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11583 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11584 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11585 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11586 case VMX_EXIT_MOV_CRX:
11587 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11588 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11589 SET_BOTH(CRX_READ);
11590 else
11591 SET_BOTH(CRX_WRITE);
11592 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11593 break;
11594 case VMX_EXIT_MOV_DRX:
11595 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11596 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11597 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11598 SET_BOTH(DRX_READ);
11599 else
11600 SET_BOTH(DRX_WRITE);
11601 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11602 break;
11603 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11604 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11605 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11606 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11607 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11608 case VMX_EXIT_GDTR_IDTR_ACCESS:
11609 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11610 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11611 {
11612 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11613 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11614 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11615 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11616 }
11617 break;
11618
11619 case VMX_EXIT_LDTR_TR_ACCESS:
11620 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11621 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11622 {
11623 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11624 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11625 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11626 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11627 }
11628 break;
11629
11630 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11631 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11632 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11633 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11634 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11635 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11636 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11637 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11638 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11639 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11640 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11641
11642 /* Events that aren't relevant at this point. */
11643 case VMX_EXIT_EXT_INT:
11644 case VMX_EXIT_INT_WINDOW:
11645 case VMX_EXIT_NMI_WINDOW:
11646 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11647 case VMX_EXIT_PREEMPT_TIMER:
11648 case VMX_EXIT_IO_INSTR:
11649 break;
11650
11651 /* Errors and unexpected events. */
11652 case VMX_EXIT_INIT_SIGNAL:
11653 case VMX_EXIT_SIPI:
11654 case VMX_EXIT_IO_SMI:
11655 case VMX_EXIT_SMI:
11656 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11657 case VMX_EXIT_ERR_MSR_LOAD:
11658 case VMX_EXIT_ERR_MACHINE_CHECK:
11659 case VMX_EXIT_PML_FULL:
11660 case VMX_EXIT_VIRTUALIZED_EOI:
11661 break;
11662
11663 default:
11664 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11665 break;
11666 }
11667#undef SET_BOTH
11668#undef SET_EXIT
11669
11670 /*
11671 * Dtrace tracepoints go first. We do them here at once so we don't
11672 * have to copy the guest state saving and stuff a few dozen times.
11673 * Down side is that we've got to repeat the switch, though this time
11674 * we use enmEvent since the probes are a subset of what DBGF does.
11675 */
11676 if (fDtrace1 || fDtrace2)
11677 {
11678 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11679 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11680 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11681 switch (enmEvent1)
11682 {
11683 /** @todo consider which extra parameters would be helpful for each probe. */
11684 case DBGFEVENT_END: break;
11685 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11686 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11687 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11688 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11689 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11690 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11691 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11692 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11693 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11694 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11695 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11696 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11697 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11698 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11699 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11700 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11701 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11702 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11703 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11704 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11705 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11706 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11707 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11708 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11709 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11710 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11711 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11712 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11713 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11714 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11715 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11716 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11717 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11718 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11719 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11720 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11721 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11722 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11723 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11724 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11725 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11726 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11727 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11728 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11729 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11730 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11731 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11732 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11733 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11734 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11735 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11736 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11737 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11738 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11739 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11740 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11741 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11742 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11743 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11744 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11745 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11746 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11747 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11748 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11749 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11750 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11751 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11752 }
11753 switch (enmEvent2)
11754 {
11755 /** @todo consider which extra parameters would be helpful for each probe. */
11756 case DBGFEVENT_END: break;
11757 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11758 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11759 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11760 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11761 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11762 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11763 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11764 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11765 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11766 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11767 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11768 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11769 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11770 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11771 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11772 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11773 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11774 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11775 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11776 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11777 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11778 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11779 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11780 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11781 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11782 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11783 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11784 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11785 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11786 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11787 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11788 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11789 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11790 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11791 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11792 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11793 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11794 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11795 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11796 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11797 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11798 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11799 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11800 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11801 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11802 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11803 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11804 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11805 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11806 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11807 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11808 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11809 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11810 }
11811 }
11812
11813 /*
11814 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11815 * the DBGF call will do a full check).
11816 *
11817 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11818 * Note! If we have to events, we prioritize the first, i.e. the instruction
11819 * one, in order to avoid event nesting.
11820 */
11821 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11822 if ( enmEvent1 != DBGFEVENT_END
11823 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11824 {
11825 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11826 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11827 if (rcStrict != VINF_SUCCESS)
11828 return rcStrict;
11829 }
11830 else if ( enmEvent2 != DBGFEVENT_END
11831 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11832 {
11833 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11834 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11835 if (rcStrict != VINF_SUCCESS)
11836 return rcStrict;
11837 }
11838
11839 return VINF_SUCCESS;
11840}
11841
11842
11843/**
11844 * Single-stepping VM-exit filtering.
11845 *
11846 * This is preprocessing the VM-exits and deciding whether we've gotten far
11847 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11848 * handling is performed.
11849 *
11850 * @returns Strict VBox status code (i.e. informational status codes too).
11851 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11852 * @param pVmxTransient The VMX-transient structure.
11853 * @param pDbgState The debug state.
11854 */
11855DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11856{
11857 /*
11858 * Expensive (saves context) generic dtrace VM-exit probe.
11859 */
11860 uint32_t const uExitReason = pVmxTransient->uExitReason;
11861 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11862 { /* more likely */ }
11863 else
11864 {
11865 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11866 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11867 AssertRC(rc);
11868 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11869 }
11870
11871#ifndef IN_NEM_DARWIN
11872 /*
11873 * Check for host NMI, just to get that out of the way.
11874 */
11875 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11876 { /* normally likely */ }
11877 else
11878 {
11879 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11880 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11881 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11882 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11883 }
11884#endif
11885
11886 /*
11887 * Check for single stepping event if we're stepping.
11888 */
11889 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11890 {
11891 switch (uExitReason)
11892 {
11893 case VMX_EXIT_MTF:
11894 return vmxHCExitMtf(pVCpu, pVmxTransient);
11895
11896 /* Various events: */
11897 case VMX_EXIT_XCPT_OR_NMI:
11898 case VMX_EXIT_EXT_INT:
11899 case VMX_EXIT_TRIPLE_FAULT:
11900 case VMX_EXIT_INT_WINDOW:
11901 case VMX_EXIT_NMI_WINDOW:
11902 case VMX_EXIT_TASK_SWITCH:
11903 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11904 case VMX_EXIT_APIC_ACCESS:
11905 case VMX_EXIT_EPT_VIOLATION:
11906 case VMX_EXIT_EPT_MISCONFIG:
11907 case VMX_EXIT_PREEMPT_TIMER:
11908
11909 /* Instruction specific VM-exits: */
11910 case VMX_EXIT_CPUID:
11911 case VMX_EXIT_GETSEC:
11912 case VMX_EXIT_HLT:
11913 case VMX_EXIT_INVD:
11914 case VMX_EXIT_INVLPG:
11915 case VMX_EXIT_RDPMC:
11916 case VMX_EXIT_RDTSC:
11917 case VMX_EXIT_RSM:
11918 case VMX_EXIT_VMCALL:
11919 case VMX_EXIT_VMCLEAR:
11920 case VMX_EXIT_VMLAUNCH:
11921 case VMX_EXIT_VMPTRLD:
11922 case VMX_EXIT_VMPTRST:
11923 case VMX_EXIT_VMREAD:
11924 case VMX_EXIT_VMRESUME:
11925 case VMX_EXIT_VMWRITE:
11926 case VMX_EXIT_VMXOFF:
11927 case VMX_EXIT_VMXON:
11928 case VMX_EXIT_MOV_CRX:
11929 case VMX_EXIT_MOV_DRX:
11930 case VMX_EXIT_IO_INSTR:
11931 case VMX_EXIT_RDMSR:
11932 case VMX_EXIT_WRMSR:
11933 case VMX_EXIT_MWAIT:
11934 case VMX_EXIT_MONITOR:
11935 case VMX_EXIT_PAUSE:
11936 case VMX_EXIT_GDTR_IDTR_ACCESS:
11937 case VMX_EXIT_LDTR_TR_ACCESS:
11938 case VMX_EXIT_INVEPT:
11939 case VMX_EXIT_RDTSCP:
11940 case VMX_EXIT_INVVPID:
11941 case VMX_EXIT_WBINVD:
11942 case VMX_EXIT_XSETBV:
11943 case VMX_EXIT_RDRAND:
11944 case VMX_EXIT_INVPCID:
11945 case VMX_EXIT_VMFUNC:
11946 case VMX_EXIT_RDSEED:
11947 case VMX_EXIT_XSAVES:
11948 case VMX_EXIT_XRSTORS:
11949 {
11950 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11951 AssertRCReturn(rc, rc);
11952 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11953 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11954 return VINF_EM_DBG_STEPPED;
11955 break;
11956 }
11957
11958 /* Errors and unexpected events: */
11959 case VMX_EXIT_INIT_SIGNAL:
11960 case VMX_EXIT_SIPI:
11961 case VMX_EXIT_IO_SMI:
11962 case VMX_EXIT_SMI:
11963 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11964 case VMX_EXIT_ERR_MSR_LOAD:
11965 case VMX_EXIT_ERR_MACHINE_CHECK:
11966 case VMX_EXIT_PML_FULL:
11967 case VMX_EXIT_VIRTUALIZED_EOI:
11968 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11969 break;
11970
11971 default:
11972 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11973 break;
11974 }
11975 }
11976
11977 /*
11978 * Check for debugger event breakpoints and dtrace probes.
11979 */
11980 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11981 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11982 {
11983 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11984 if (rcStrict != VINF_SUCCESS)
11985 return rcStrict;
11986 }
11987
11988 /*
11989 * Normal processing.
11990 */
11991#ifdef HMVMX_USE_FUNCTION_TABLE
11992 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11993#else
11994 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11995#endif
11996}
11997
11998/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette