VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 97698

Last change on this file since 97698 was 97614, checked in by vboxsync, 2 years ago

VMM/HMVMXR0: Must actually reload the initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP in vmxHCRunDebugStateRevert or we'll assert later in vmxHCCheckCachedVmcsCtls.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 525.1 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 97614 2022-11-19 23:53:25Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330 VMX_VMCS16_HLAT_PREFIX_SIZE,
331
332 /* 16-bit guest-state fields. */
333 VMX_VMCS16_GUEST_ES_SEL,
334 VMX_VMCS16_GUEST_CS_SEL,
335 VMX_VMCS16_GUEST_SS_SEL,
336 VMX_VMCS16_GUEST_DS_SEL,
337 VMX_VMCS16_GUEST_FS_SEL,
338 VMX_VMCS16_GUEST_GS_SEL,
339 VMX_VMCS16_GUEST_LDTR_SEL,
340 VMX_VMCS16_GUEST_TR_SEL,
341 VMX_VMCS16_GUEST_INTR_STATUS,
342 VMX_VMCS16_GUEST_PML_INDEX,
343
344 /* 16-bits host-state fields. */
345 VMX_VMCS16_HOST_ES_SEL,
346 VMX_VMCS16_HOST_CS_SEL,
347 VMX_VMCS16_HOST_SS_SEL,
348 VMX_VMCS16_HOST_DS_SEL,
349 VMX_VMCS16_HOST_FS_SEL,
350 VMX_VMCS16_HOST_GS_SEL,
351 VMX_VMCS16_HOST_TR_SEL,
352
353 /* 64-bit control fields. */
354 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
355 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
357 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
358 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
359 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
361 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
363 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
365 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
367 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
369 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
370 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
371 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
373 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
375 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
377 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
379 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
380 VMX_VMCS64_CTRL_EPTP_FULL,
381 VMX_VMCS64_CTRL_EPTP_HIGH,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
383 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
385 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
387 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
389 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
390 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
391 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
393 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
395 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
397 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
399 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
401 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
402 VMX_VMCS64_CTRL_SPPTP_FULL,
403 VMX_VMCS64_CTRL_SPPTP_HIGH,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
405 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
406 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
407 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
410 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_FULL,
411 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_HIGH,
412 VMX_VMCS64_CTRL_HLAT_PTR_FULL,
413 VMX_VMCS64_CTRL_HLAT_PTR_HIGH,
414 VMX_VMCS64_CTRL_EXIT2_FULL,
415 VMX_VMCS64_CTRL_EXIT2_HIGH,
416
417 /* 64-bit read-only data fields. */
418 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
419 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
420
421 /* 64-bit guest-state fields. */
422 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
423 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
424 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
425 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
426 VMX_VMCS64_GUEST_PAT_FULL,
427 VMX_VMCS64_GUEST_PAT_HIGH,
428 VMX_VMCS64_GUEST_EFER_FULL,
429 VMX_VMCS64_GUEST_EFER_HIGH,
430 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
431 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
432 VMX_VMCS64_GUEST_PDPTE0_FULL,
433 VMX_VMCS64_GUEST_PDPTE0_HIGH,
434 VMX_VMCS64_GUEST_PDPTE1_FULL,
435 VMX_VMCS64_GUEST_PDPTE1_HIGH,
436 VMX_VMCS64_GUEST_PDPTE2_FULL,
437 VMX_VMCS64_GUEST_PDPTE2_HIGH,
438 VMX_VMCS64_GUEST_PDPTE3_FULL,
439 VMX_VMCS64_GUEST_PDPTE3_HIGH,
440 VMX_VMCS64_GUEST_BNDCFGS_FULL,
441 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
442 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
443 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
444 VMX_VMCS64_GUEST_PKRS_FULL,
445 VMX_VMCS64_GUEST_PKRS_HIGH,
446
447 /* 64-bit host-state fields. */
448 VMX_VMCS64_HOST_PAT_FULL,
449 VMX_VMCS64_HOST_PAT_HIGH,
450 VMX_VMCS64_HOST_EFER_FULL,
451 VMX_VMCS64_HOST_EFER_HIGH,
452 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
453 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
454 VMX_VMCS64_HOST_PKRS_FULL,
455 VMX_VMCS64_HOST_PKRS_HIGH,
456
457 /* 32-bit control fields. */
458 VMX_VMCS32_CTRL_PIN_EXEC,
459 VMX_VMCS32_CTRL_PROC_EXEC,
460 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
461 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
462 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
463 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
464 VMX_VMCS32_CTRL_EXIT,
465 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
466 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
467 VMX_VMCS32_CTRL_ENTRY,
468 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
469 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
470 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
471 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
472 VMX_VMCS32_CTRL_TPR_THRESHOLD,
473 VMX_VMCS32_CTRL_PROC_EXEC2,
474 VMX_VMCS32_CTRL_PLE_GAP,
475 VMX_VMCS32_CTRL_PLE_WINDOW,
476
477 /* 32-bits read-only fields. */
478 VMX_VMCS32_RO_VM_INSTR_ERROR,
479 VMX_VMCS32_RO_EXIT_REASON,
480 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
481 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
482 VMX_VMCS32_RO_IDT_VECTORING_INFO,
483 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
484 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
485 VMX_VMCS32_RO_EXIT_INSTR_INFO,
486
487 /* 32-bit guest-state fields. */
488 VMX_VMCS32_GUEST_ES_LIMIT,
489 VMX_VMCS32_GUEST_CS_LIMIT,
490 VMX_VMCS32_GUEST_SS_LIMIT,
491 VMX_VMCS32_GUEST_DS_LIMIT,
492 VMX_VMCS32_GUEST_FS_LIMIT,
493 VMX_VMCS32_GUEST_GS_LIMIT,
494 VMX_VMCS32_GUEST_LDTR_LIMIT,
495 VMX_VMCS32_GUEST_TR_LIMIT,
496 VMX_VMCS32_GUEST_GDTR_LIMIT,
497 VMX_VMCS32_GUEST_IDTR_LIMIT,
498 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
501 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
502 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
503 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
504 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
505 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
506 VMX_VMCS32_GUEST_INT_STATE,
507 VMX_VMCS32_GUEST_ACTIVITY_STATE,
508 VMX_VMCS32_GUEST_SMBASE,
509 VMX_VMCS32_GUEST_SYSENTER_CS,
510 VMX_VMCS32_PREEMPT_TIMER_VALUE,
511
512 /* 32-bit host-state fields. */
513 VMX_VMCS32_HOST_SYSENTER_CS,
514
515 /* Natural-width control fields. */
516 VMX_VMCS_CTRL_CR0_MASK,
517 VMX_VMCS_CTRL_CR4_MASK,
518 VMX_VMCS_CTRL_CR0_READ_SHADOW,
519 VMX_VMCS_CTRL_CR4_READ_SHADOW,
520 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
521 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
522 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
523 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
524
525 /* Natural-width read-only data fields. */
526 VMX_VMCS_RO_EXIT_QUALIFICATION,
527 VMX_VMCS_RO_IO_RCX,
528 VMX_VMCS_RO_IO_RSI,
529 VMX_VMCS_RO_IO_RDI,
530 VMX_VMCS_RO_IO_RIP,
531 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
532
533 /* Natural-width guest-state field */
534 VMX_VMCS_GUEST_CR0,
535 VMX_VMCS_GUEST_CR3,
536 VMX_VMCS_GUEST_CR4,
537 VMX_VMCS_GUEST_ES_BASE,
538 VMX_VMCS_GUEST_CS_BASE,
539 VMX_VMCS_GUEST_SS_BASE,
540 VMX_VMCS_GUEST_DS_BASE,
541 VMX_VMCS_GUEST_FS_BASE,
542 VMX_VMCS_GUEST_GS_BASE,
543 VMX_VMCS_GUEST_LDTR_BASE,
544 VMX_VMCS_GUEST_TR_BASE,
545 VMX_VMCS_GUEST_GDTR_BASE,
546 VMX_VMCS_GUEST_IDTR_BASE,
547 VMX_VMCS_GUEST_DR7,
548 VMX_VMCS_GUEST_RSP,
549 VMX_VMCS_GUEST_RIP,
550 VMX_VMCS_GUEST_RFLAGS,
551 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
552 VMX_VMCS_GUEST_SYSENTER_ESP,
553 VMX_VMCS_GUEST_SYSENTER_EIP,
554 VMX_VMCS_GUEST_S_CET,
555 VMX_VMCS_GUEST_SSP,
556 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
557
558 /* Natural-width host-state fields */
559 VMX_VMCS_HOST_CR0,
560 VMX_VMCS_HOST_CR3,
561 VMX_VMCS_HOST_CR4,
562 VMX_VMCS_HOST_FS_BASE,
563 VMX_VMCS_HOST_GS_BASE,
564 VMX_VMCS_HOST_TR_BASE,
565 VMX_VMCS_HOST_GDTR_BASE,
566 VMX_VMCS_HOST_IDTR_BASE,
567 VMX_VMCS_HOST_SYSENTER_ESP,
568 VMX_VMCS_HOST_SYSENTER_EIP,
569 VMX_VMCS_HOST_RSP,
570 VMX_VMCS_HOST_RIP,
571 VMX_VMCS_HOST_S_CET,
572 VMX_VMCS_HOST_SSP,
573 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
574};
575#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
576
577#ifdef HMVMX_USE_FUNCTION_TABLE
578/**
579 * VMX_EXIT dispatch table.
580 */
581static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
582{
583 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
584 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
585 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
586 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
587 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
588 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
589 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
590 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
591 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
592 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
593 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
594 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
595 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
596 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
597 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
598 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
599 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
600 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
601 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
602#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
603 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
604 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
605 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
606 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
607 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
608 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
609 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
610 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
611 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
612#else
613 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
614 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
615 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
616 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
617 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
618 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
619 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
620 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
621 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
622#endif
623 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
624 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
625 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
626 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
627 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
628 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
629 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
630 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
632 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
633 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
634 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
635 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
636 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
637 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
638 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
639 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
640 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
641 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
642 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
643 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
644 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
646 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
647#else
648 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
651 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
652#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
653 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
654#else
655 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
656#endif
657 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
658 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
659 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
660 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
661 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
662 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
663 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
664 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
665 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
666 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
667 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
668 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
669 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
670 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
671 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
672 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
673};
674#endif /* HMVMX_USE_FUNCTION_TABLE */
675
676#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
677static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
678{
679 /* 0 */ "(Not Used)",
680 /* 1 */ "VMCALL executed in VMX root operation.",
681 /* 2 */ "VMCLEAR with invalid physical address.",
682 /* 3 */ "VMCLEAR with VMXON pointer.",
683 /* 4 */ "VMLAUNCH with non-clear VMCS.",
684 /* 5 */ "VMRESUME with non-launched VMCS.",
685 /* 6 */ "VMRESUME after VMXOFF",
686 /* 7 */ "VM-entry with invalid control fields.",
687 /* 8 */ "VM-entry with invalid host state fields.",
688 /* 9 */ "VMPTRLD with invalid physical address.",
689 /* 10 */ "VMPTRLD with VMXON pointer.",
690 /* 11 */ "VMPTRLD with incorrect revision identifier.",
691 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
692 /* 13 */ "VMWRITE to read-only VMCS component.",
693 /* 14 */ "(Not Used)",
694 /* 15 */ "VMXON executed in VMX root operation.",
695 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
696 /* 17 */ "VM-entry with non-launched executing VMCS.",
697 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
698 /* 19 */ "VMCALL with non-clear VMCS.",
699 /* 20 */ "VMCALL with invalid VM-exit control fields.",
700 /* 21 */ "(Not Used)",
701 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
702 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
703 /* 24 */ "VMCALL with invalid SMM-monitor features.",
704 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
705 /* 26 */ "VM-entry with events blocked by MOV SS.",
706 /* 27 */ "(Not Used)",
707 /* 28 */ "Invalid operand to INVEPT/INVVPID."
708};
709#endif /* VBOX_STRICT && LOG_ENABLED */
710
711
712/**
713 * Gets the CR0 guest/host mask.
714 *
715 * These bits typically does not change through the lifetime of a VM. Any bit set in
716 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
717 * by the guest.
718 *
719 * @returns The CR0 guest/host mask.
720 * @param pVCpu The cross context virtual CPU structure.
721 */
722static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
723{
724 /*
725 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
726 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
727 *
728 * Furthermore, modifications to any bits that are reserved/unspecified currently
729 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
730 * when future CPUs specify and use currently reserved/unspecified bits.
731 */
732 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
733 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
734 * and @bugref{6944}. */
735 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
736 return ( X86_CR0_PE
737 | X86_CR0_NE
738 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
739 | X86_CR0_PG
740 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
741}
742
743
744/**
745 * Gets the CR4 guest/host mask.
746 *
747 * These bits typically does not change through the lifetime of a VM. Any bit set in
748 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
749 * by the guest.
750 *
751 * @returns The CR4 guest/host mask.
752 * @param pVCpu The cross context virtual CPU structure.
753 */
754static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
755{
756 /*
757 * We construct a mask of all CR4 bits that the guest can modify without causing
758 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
759 * a VM-exit when the guest attempts to modify them when executing using
760 * hardware-assisted VMX.
761 *
762 * When a feature is not exposed to the guest (and may be present on the host),
763 * we want to intercept guest modifications to the bit so we can emulate proper
764 * behavior (e.g., #GP).
765 *
766 * Furthermore, only modifications to those bits that don't require immediate
767 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
768 * depends on CR3 which might not always be the guest value while executing
769 * using hardware-assisted VMX.
770 */
771 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
772 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
773#ifdef IN_NEM_DARWIN
774 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
775#endif
776 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
777
778 /*
779 * Paranoia.
780 * Ensure features exposed to the guest are present on the host.
781 */
782 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
783#ifdef IN_NEM_DARWIN
784 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
785#endif
786 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
787
788 uint64_t const fGstMask = X86_CR4_PVI
789 | X86_CR4_TSD
790 | X86_CR4_DE
791 | X86_CR4_MCE
792 | X86_CR4_PCE
793 | X86_CR4_OSXMMEEXCPT
794 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
795#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
796 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
797 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
798#endif
799 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
800 return ~fGstMask;
801}
802
803
804/**
805 * Adds one or more exceptions to the exception bitmap and commits it to the current
806 * VMCS.
807 *
808 * @param pVCpu The cross context virtual CPU structure.
809 * @param pVmxTransient The VMX-transient structure.
810 * @param uXcptMask The exception(s) to add.
811 */
812static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
813{
814 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
815 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
816 if ((uXcptBitmap & uXcptMask) != uXcptMask)
817 {
818 uXcptBitmap |= uXcptMask;
819 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
820 AssertRC(rc);
821 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
822 }
823}
824
825
826/**
827 * Adds an exception to the exception bitmap and commits it to the current VMCS.
828 *
829 * @param pVCpu The cross context virtual CPU structure.
830 * @param pVmxTransient The VMX-transient structure.
831 * @param uXcpt The exception to add.
832 */
833static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
834{
835 Assert(uXcpt <= X86_XCPT_LAST);
836 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
837}
838
839
840/**
841 * Remove one or more exceptions from the exception bitmap and commits it to the
842 * current VMCS.
843 *
844 * This takes care of not removing the exception intercept if a nested-guest
845 * requires the exception to be intercepted.
846 *
847 * @returns VBox status code.
848 * @param pVCpu The cross context virtual CPU structure.
849 * @param pVmxTransient The VMX-transient structure.
850 * @param uXcptMask The exception(s) to remove.
851 */
852static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
853{
854 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
855 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
856 if (uXcptBitmap & uXcptMask)
857 {
858#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
859 if (!pVmxTransient->fIsNestedGuest)
860 { /* likely */ }
861 else
862 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
863#endif
864#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
865 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
866 | RT_BIT(X86_XCPT_DE)
867 | RT_BIT(X86_XCPT_NM)
868 | RT_BIT(X86_XCPT_TS)
869 | RT_BIT(X86_XCPT_UD)
870 | RT_BIT(X86_XCPT_NP)
871 | RT_BIT(X86_XCPT_SS)
872 | RT_BIT(X86_XCPT_GP)
873 | RT_BIT(X86_XCPT_PF)
874 | RT_BIT(X86_XCPT_MF));
875#elif defined(HMVMX_ALWAYS_TRAP_PF)
876 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
877#endif
878 if (uXcptMask)
879 {
880 /* Validate we are not removing any essential exception intercepts. */
881#ifndef IN_NEM_DARWIN
882 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
883#else
884 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
885#endif
886 NOREF(pVCpu);
887 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
888 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
889
890 /* Remove it from the exception bitmap. */
891 uXcptBitmap &= ~uXcptMask;
892
893 /* Commit and update the cache if necessary. */
894 if (pVmcsInfo->u32XcptBitmap != uXcptBitmap)
895 {
896 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
897 AssertRC(rc);
898 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
899 }
900 }
901 }
902 return VINF_SUCCESS;
903}
904
905
906/**
907 * Remove an exceptions from the exception bitmap and commits it to the current
908 * VMCS.
909 *
910 * @returns VBox status code.
911 * @param pVCpu The cross context virtual CPU structure.
912 * @param pVmxTransient The VMX-transient structure.
913 * @param uXcpt The exception to remove.
914 */
915static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
916{
917 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
918}
919
920#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
921
922/**
923 * Loads the shadow VMCS specified by the VMCS info. object.
924 *
925 * @returns VBox status code.
926 * @param pVmcsInfo The VMCS info. object.
927 *
928 * @remarks Can be called with interrupts disabled.
929 */
930static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
931{
932 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
933 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
934
935 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
936 if (RT_SUCCESS(rc))
937 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
938 return rc;
939}
940
941
942/**
943 * Clears the shadow VMCS specified by the VMCS info. object.
944 *
945 * @returns VBox status code.
946 * @param pVmcsInfo The VMCS info. object.
947 *
948 * @remarks Can be called with interrupts disabled.
949 */
950static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
951{
952 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
953 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
954
955 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
956 if (RT_SUCCESS(rc))
957 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
958 return rc;
959}
960
961
962/**
963 * Switches from and to the specified VMCSes.
964 *
965 * @returns VBox status code.
966 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
967 * @param pVmcsInfoTo The VMCS info. object we are switching to.
968 *
969 * @remarks Called with interrupts disabled.
970 */
971static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
972{
973 /*
974 * Clear the VMCS we are switching out if it has not already been cleared.
975 * This will sync any CPU internal data back to the VMCS.
976 */
977 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
978 {
979 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
980 if (RT_SUCCESS(rc))
981 {
982 /*
983 * The shadow VMCS, if any, would not be active at this point since we
984 * would have cleared it while importing the virtual hardware-virtualization
985 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
986 * clear the shadow VMCS here, just assert for safety.
987 */
988 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
989 }
990 else
991 return rc;
992 }
993
994 /*
995 * Clear the VMCS we are switching to if it has not already been cleared.
996 * This will initialize the VMCS launch state to "clear" required for loading it.
997 *
998 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
999 */
1000 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1001 {
1002 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1003 if (RT_SUCCESS(rc))
1004 { /* likely */ }
1005 else
1006 return rc;
1007 }
1008
1009 /*
1010 * Finally, load the VMCS we are switching to.
1011 */
1012 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1013}
1014
1015
1016/**
1017 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1018 * caller.
1019 *
1020 * @returns VBox status code.
1021 * @param pVCpu The cross context virtual CPU structure.
1022 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1023 * true) or guest VMCS (pass false).
1024 */
1025static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1026{
1027 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1028 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1029
1030 PVMXVMCSINFO pVmcsInfoFrom;
1031 PVMXVMCSINFO pVmcsInfoTo;
1032 if (fSwitchToNstGstVmcs)
1033 {
1034 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1035 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1036 }
1037 else
1038 {
1039 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1040 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1041 }
1042
1043 /*
1044 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1045 * preemption hook code path acquires the current VMCS.
1046 */
1047 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1048
1049 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1050 if (RT_SUCCESS(rc))
1051 {
1052 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1053 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1054
1055 /*
1056 * If we are switching to a VMCS that was executed on a different host CPU or was
1057 * never executed before, flag that we need to export the host state before executing
1058 * guest/nested-guest code using hardware-assisted VMX.
1059 *
1060 * This could probably be done in a preemptible context since the preemption hook
1061 * will flag the necessary change in host context. However, since preemption is
1062 * already disabled and to avoid making assumptions about host specific code in
1063 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1064 * disabled.
1065 */
1066 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1067 { /* likely */ }
1068 else
1069 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1070
1071 ASMSetFlags(fEFlags);
1072
1073 /*
1074 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1075 * flag that we need to update the host MSR values there. Even if we decide in the
1076 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1077 * if its content differs, we would have to update the host MSRs anyway.
1078 */
1079 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1080 }
1081 else
1082 ASMSetFlags(fEFlags);
1083 return rc;
1084}
1085
1086#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1087#ifdef VBOX_STRICT
1088
1089/**
1090 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1091 * transient structure.
1092 *
1093 * @param pVCpu The cross context virtual CPU structure.
1094 * @param pVmxTransient The VMX-transient structure.
1095 */
1096DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1097{
1098 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1099 AssertRC(rc);
1100}
1101
1102
1103/**
1104 * Reads the VM-entry exception error code field from the VMCS into
1105 * the VMX transient structure.
1106 *
1107 * @param pVCpu The cross context virtual CPU structure.
1108 * @param pVmxTransient The VMX-transient structure.
1109 */
1110DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1111{
1112 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1113 AssertRC(rc);
1114}
1115
1116
1117/**
1118 * Reads the VM-entry exception error code field from the VMCS into
1119 * the VMX transient structure.
1120 *
1121 * @param pVCpu The cross context virtual CPU structure.
1122 * @param pVmxTransient The VMX-transient structure.
1123 */
1124DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1125{
1126 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1127 AssertRC(rc);
1128}
1129
1130#endif /* VBOX_STRICT */
1131
1132
1133/**
1134 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1135 *
1136 * Don't call directly unless the it's likely that some or all of the fields
1137 * given in @a a_fReadMask have already been read.
1138 *
1139 * @tparam a_fReadMask The fields to read.
1140 * @param pVCpu The cross context virtual CPU structure.
1141 * @param pVmxTransient The VMX-transient structure.
1142 */
1143template<uint32_t const a_fReadMask>
1144static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1145{
1146 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1147 | HMVMX_READ_EXIT_INSTR_LEN
1148 | HMVMX_READ_EXIT_INSTR_INFO
1149 | HMVMX_READ_IDT_VECTORING_INFO
1150 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1151 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1152 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1153 | HMVMX_READ_GUEST_LINEAR_ADDR
1154 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1155 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1156 )) == 0);
1157
1158 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1159 {
1160 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1161
1162 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1163 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1164 {
1165 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1166 AssertRC(rc);
1167 }
1168 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1169 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1170 {
1171 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1172 AssertRC(rc);
1173 }
1174 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1175 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1176 {
1177 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1178 AssertRC(rc);
1179 }
1180 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1181 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1182 {
1183 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1184 AssertRC(rc);
1185 }
1186 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1187 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1188 {
1189 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1190 AssertRC(rc);
1191 }
1192 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1193 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1194 {
1195 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1196 AssertRC(rc);
1197 }
1198 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1199 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1200 {
1201 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1202 AssertRC(rc);
1203 }
1204 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1205 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1206 {
1207 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1208 AssertRC(rc);
1209 }
1210 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1211 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1212 {
1213 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1214 AssertRC(rc);
1215 }
1216 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1217 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1218 {
1219 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1220 AssertRC(rc);
1221 }
1222
1223 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1224 }
1225}
1226
1227
1228/**
1229 * Reads VMCS fields into the VMXTRANSIENT structure.
1230 *
1231 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1232 * generating an optimized read sequences w/o any conditionals between in
1233 * non-strict builds.
1234 *
1235 * @tparam a_fReadMask The fields to read. One or more of the
1236 * HMVMX_READ_XXX fields ORed together.
1237 * @param pVCpu The cross context virtual CPU structure.
1238 * @param pVmxTransient The VMX-transient structure.
1239 */
1240template<uint32_t const a_fReadMask>
1241DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1242{
1243 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1244 | HMVMX_READ_EXIT_INSTR_LEN
1245 | HMVMX_READ_EXIT_INSTR_INFO
1246 | HMVMX_READ_IDT_VECTORING_INFO
1247 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1248 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1249 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1250 | HMVMX_READ_GUEST_LINEAR_ADDR
1251 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1252 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1253 )) == 0);
1254
1255 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1256 {
1257 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1258 {
1259 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1260 AssertRC(rc);
1261 }
1262 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1263 {
1264 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1265 AssertRC(rc);
1266 }
1267 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1268 {
1269 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1270 AssertRC(rc);
1271 }
1272 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1273 {
1274 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1275 AssertRC(rc);
1276 }
1277 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1278 {
1279 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1280 AssertRC(rc);
1281 }
1282 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1283 {
1284 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1285 AssertRC(rc);
1286 }
1287 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1288 {
1289 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1290 AssertRC(rc);
1291 }
1292 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1293 {
1294 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1295 AssertRC(rc);
1296 }
1297 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1298 {
1299 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1300 AssertRC(rc);
1301 }
1302 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1303 {
1304 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1305 AssertRC(rc);
1306 }
1307
1308 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1309 }
1310 else
1311 {
1312 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1313 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1314 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1315 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1316 }
1317}
1318
1319
1320#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1321/**
1322 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1323 *
1324 * @param pVCpu The cross context virtual CPU structure.
1325 * @param pVmxTransient The VMX-transient structure.
1326 */
1327static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1328{
1329 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1330 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1331 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1332 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1333 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1334 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1335 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1336 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1337 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1338 AssertRC(rc);
1339 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1340 | HMVMX_READ_EXIT_INSTR_LEN
1341 | HMVMX_READ_EXIT_INSTR_INFO
1342 | HMVMX_READ_IDT_VECTORING_INFO
1343 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1344 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1345 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1346 | HMVMX_READ_GUEST_LINEAR_ADDR
1347 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1348}
1349#endif
1350
1351/**
1352 * Verifies that our cached values of the VMCS fields are all consistent with
1353 * what's actually present in the VMCS.
1354 *
1355 * @returns VBox status code.
1356 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1357 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1358 * VMCS content. HMCPU error-field is
1359 * updated, see VMX_VCI_XXX.
1360 * @param pVCpu The cross context virtual CPU structure.
1361 * @param pVmcsInfo The VMCS info. object.
1362 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1363 */
1364static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1365{
1366 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1367
1368 uint32_t u32Val;
1369 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1370 AssertRC(rc);
1371 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1372 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1373 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1374 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1375
1376 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1377 AssertRC(rc);
1378 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1379 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1380 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1381 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1382
1383 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1384 AssertRC(rc);
1385 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1386 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1387 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1388 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1389
1390 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1391 AssertRC(rc);
1392 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1393 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1394 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1395 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1396
1397 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1398 {
1399 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1400 AssertRC(rc);
1401 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1402 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1403 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1404 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1405 }
1406
1407 uint64_t u64Val;
1408 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1409 {
1410 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1411 AssertRC(rc);
1412 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1413 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1414 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1415 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1416 }
1417
1418 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1419 AssertRC(rc);
1420 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1421 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1422 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1423 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1424
1425 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1426 AssertRC(rc);
1427 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1428 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1429 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1430 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1431
1432 NOREF(pcszVmcs);
1433 return VINF_SUCCESS;
1434}
1435
1436
1437/**
1438 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1439 * VMCS.
1440 *
1441 * This is typically required when the guest changes paging mode.
1442 *
1443 * @returns VBox status code.
1444 * @param pVCpu The cross context virtual CPU structure.
1445 * @param pVmxTransient The VMX-transient structure.
1446 *
1447 * @remarks Requires EFER.
1448 * @remarks No-long-jump zone!!!
1449 */
1450static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1451{
1452 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1453 {
1454 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1455 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1456
1457 /*
1458 * VM-entry controls.
1459 */
1460 {
1461 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1462 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1463
1464 /*
1465 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1466 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1467 *
1468 * For nested-guests, this is a mandatory VM-entry control. It's also
1469 * required because we do not want to leak host bits to the nested-guest.
1470 */
1471 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1472
1473 /*
1474 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1475 *
1476 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1477 * required to get the nested-guest working with hardware-assisted VMX execution.
1478 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1479 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1480 * here rather than while merging the guest VMCS controls.
1481 */
1482 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1483 {
1484 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1485 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1486 }
1487 else
1488 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1489
1490 /*
1491 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1492 *
1493 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1494 * regardless of whether the nested-guest VMCS specifies it because we are free to
1495 * load whatever MSRs we require and we do not need to modify the guest visible copy
1496 * of the VM-entry MSR load area.
1497 */
1498 if ( g_fHmVmxSupportsVmcsEfer
1499#ifndef IN_NEM_DARWIN
1500 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1501#endif
1502 )
1503 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1504 else
1505 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1506
1507 /*
1508 * The following should -not- be set (since we're not in SMM mode):
1509 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1510 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1511 */
1512
1513 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1514 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1515
1516 if ((fVal & fZap) == fVal)
1517 { /* likely */ }
1518 else
1519 {
1520 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1521 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1522 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1523 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1524 }
1525
1526 /* Commit it to the VMCS. */
1527 if (pVmcsInfo->u32EntryCtls != fVal)
1528 {
1529 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1530 AssertRC(rc);
1531 pVmcsInfo->u32EntryCtls = fVal;
1532 }
1533 }
1534
1535 /*
1536 * VM-exit controls.
1537 */
1538 {
1539 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1540 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1541
1542 /*
1543 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1544 * supported the 1-setting of this bit.
1545 *
1546 * For nested-guests, we set the "save debug controls" as the converse
1547 * "load debug controls" is mandatory for nested-guests anyway.
1548 */
1549 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1550
1551 /*
1552 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1553 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1554 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1555 * vmxHCExportHostMsrs().
1556 *
1557 * For nested-guests, we always set this bit as we do not support 32-bit
1558 * hosts.
1559 */
1560 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1561
1562#ifndef IN_NEM_DARWIN
1563 /*
1564 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1565 *
1566 * For nested-guests, we should use the "save IA32_EFER" control if we also
1567 * used the "load IA32_EFER" control while exporting VM-entry controls.
1568 */
1569 if ( g_fHmVmxSupportsVmcsEfer
1570 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1571 {
1572 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1573 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1574 }
1575#endif
1576
1577 /*
1578 * Enable saving of the VMX-preemption timer value on VM-exit.
1579 * For nested-guests, currently not exposed/used.
1580 */
1581 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1582 * the timer value. */
1583 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1584 {
1585 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1586 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1587 }
1588
1589 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1590 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1591
1592 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1593 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1594 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1595
1596 if ((fVal & fZap) == fVal)
1597 { /* likely */ }
1598 else
1599 {
1600 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1601 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1602 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1603 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1604 }
1605
1606 /* Commit it to the VMCS. */
1607 if (pVmcsInfo->u32ExitCtls != fVal)
1608 {
1609 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1610 AssertRC(rc);
1611 pVmcsInfo->u32ExitCtls = fVal;
1612 }
1613 }
1614
1615 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1616 }
1617 return VINF_SUCCESS;
1618}
1619
1620
1621/**
1622 * Sets the TPR threshold in the VMCS.
1623 *
1624 * @param pVCpu The cross context virtual CPU structure.
1625 * @param pVmcsInfo The VMCS info. object.
1626 * @param u32TprThreshold The TPR threshold (task-priority class only).
1627 */
1628DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1629{
1630 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1631 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1632 RT_NOREF(pVmcsInfo);
1633 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1634 AssertRC(rc);
1635}
1636
1637
1638/**
1639 * Exports the guest APIC TPR state into the VMCS.
1640 *
1641 * @param pVCpu The cross context virtual CPU structure.
1642 * @param pVmxTransient The VMX-transient structure.
1643 *
1644 * @remarks No-long-jump zone!!!
1645 */
1646static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1647{
1648 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1649 {
1650 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1651
1652 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1653 if (!pVmxTransient->fIsNestedGuest)
1654 {
1655 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1656 && APICIsEnabled(pVCpu))
1657 {
1658 /*
1659 * Setup TPR shadowing.
1660 */
1661 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1662 {
1663 bool fPendingIntr = false;
1664 uint8_t u8Tpr = 0;
1665 uint8_t u8PendingIntr = 0;
1666 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1667 AssertRC(rc);
1668
1669 /*
1670 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1671 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1672 * priority of the pending interrupt so we can deliver the interrupt. If there
1673 * are no interrupts pending, set threshold to 0 to not cause any
1674 * TPR-below-threshold VM-exits.
1675 */
1676 uint32_t u32TprThreshold = 0;
1677 if (fPendingIntr)
1678 {
1679 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1680 (which is the Task-Priority Class). */
1681 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1682 const uint8_t u8TprPriority = u8Tpr >> 4;
1683 if (u8PendingPriority <= u8TprPriority)
1684 u32TprThreshold = u8PendingPriority;
1685 }
1686
1687 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1688 }
1689 }
1690 }
1691 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1692 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1693 }
1694}
1695
1696
1697/**
1698 * Gets the guest interruptibility-state and updates related force-flags.
1699 *
1700 * @returns Guest's interruptibility-state.
1701 * @param pVCpu The cross context virtual CPU structure.
1702 *
1703 * @remarks No-long-jump zone!!!
1704 */
1705static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1706{
1707 uint32_t fIntrState;
1708
1709 /*
1710 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1711 */
1712 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1713 fIntrState = 0;
1714 else
1715 {
1716 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1717 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1718
1719 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1720 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1721 else
1722 {
1723 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1724
1725 /* Block-by-STI must not be set when interrupts are disabled. */
1726 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1727 }
1728 }
1729
1730 /*
1731 * Check if we should inhibit NMI delivery.
1732 */
1733 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1734 { /* likely */ }
1735 else
1736 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1737
1738 /*
1739 * Validate.
1740 */
1741 /* We don't support block-by-SMI yet.*/
1742 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1743
1744 return fIntrState;
1745}
1746
1747
1748/**
1749 * Exports the exception intercepts required for guest execution in the VMCS.
1750 *
1751 * @param pVCpu The cross context virtual CPU structure.
1752 * @param pVmxTransient The VMX-transient structure.
1753 *
1754 * @remarks No-long-jump zone!!!
1755 */
1756static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1757{
1758 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1759 {
1760 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1761 if ( !pVmxTransient->fIsNestedGuest
1762 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1763 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1764 else
1765 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1766
1767 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1768 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1769 }
1770}
1771
1772
1773/**
1774 * Exports the guest's RIP into the guest-state area in the VMCS.
1775 *
1776 * @param pVCpu The cross context virtual CPU structure.
1777 *
1778 * @remarks No-long-jump zone!!!
1779 */
1780static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1781{
1782 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1783 {
1784 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1785
1786 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1787 AssertRC(rc);
1788
1789 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1790 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1791 }
1792}
1793
1794
1795/**
1796 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1797 *
1798 * @param pVCpu The cross context virtual CPU structure.
1799 * @param pVmxTransient The VMX-transient structure.
1800 *
1801 * @remarks No-long-jump zone!!!
1802 */
1803static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1804{
1805 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1806 {
1807 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1808
1809 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits
1810 of RFLAGS are reserved (MBZ). We use bits 63:24 for internal purposes, so no need
1811 to assert this, the CPUMX86EFLAGS/CPUMX86RFLAGS union masks these off for us.
1812 Use 32-bit VMWRITE. */
1813 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
1814 Assert((fEFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
1815 AssertMsg(!(fEFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), ("%#x\n", fEFlags));
1816
1817#ifndef IN_NEM_DARWIN
1818 /*
1819 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1820 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1821 * can run the real-mode guest code under Virtual 8086 mode.
1822 */
1823 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1824 if (pVmcsInfo->RealMode.fRealOnV86Active)
1825 {
1826 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1827 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1828 Assert(!pVmxTransient->fIsNestedGuest);
1829 pVmcsInfo->RealMode.Eflags.u32 = fEFlags; /* Save the original eflags of the real-mode guest. */
1830 fEFlags |= X86_EFL_VM; /* Set the Virtual 8086 mode bit. */
1831 fEFlags &= ~X86_EFL_IOPL; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1832 }
1833#else
1834 RT_NOREF(pVmxTransient);
1835#endif
1836
1837 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags);
1838 AssertRC(rc);
1839
1840 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1841 Log4Func(("eflags=%#RX32\n", fEFlags));
1842 }
1843}
1844
1845
1846#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1847/**
1848 * Copies the nested-guest VMCS to the shadow VMCS.
1849 *
1850 * @returns VBox status code.
1851 * @param pVCpu The cross context virtual CPU structure.
1852 * @param pVmcsInfo The VMCS info. object.
1853 *
1854 * @remarks No-long-jump zone!!!
1855 */
1856static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1857{
1858 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1859 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1860
1861 /*
1862 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1863 * current VMCS, as we may try saving guest lazy MSRs.
1864 *
1865 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1866 * calling the import VMCS code which is currently performing the guest MSR reads
1867 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1868 * and the rest of the VMX leave session machinery.
1869 */
1870 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1871
1872 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1873 if (RT_SUCCESS(rc))
1874 {
1875 /*
1876 * Copy all guest read/write VMCS fields.
1877 *
1878 * We don't check for VMWRITE failures here for performance reasons and
1879 * because they are not expected to fail, barring irrecoverable conditions
1880 * like hardware errors.
1881 */
1882 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1883 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1884 {
1885 uint64_t u64Val;
1886 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1887 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1888 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1889 }
1890
1891 /*
1892 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1893 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1894 */
1895 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1896 {
1897 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1898 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1899 {
1900 uint64_t u64Val;
1901 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1902 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1903 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1904 }
1905 }
1906
1907 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1908 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1909 }
1910
1911 ASMSetFlags(fEFlags);
1912 return rc;
1913}
1914
1915
1916/**
1917 * Copies the shadow VMCS to the nested-guest VMCS.
1918 *
1919 * @returns VBox status code.
1920 * @param pVCpu The cross context virtual CPU structure.
1921 * @param pVmcsInfo The VMCS info. object.
1922 *
1923 * @remarks Called with interrupts disabled.
1924 */
1925static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1926{
1927 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1928 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1929 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1930
1931 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1932 if (RT_SUCCESS(rc))
1933 {
1934 /*
1935 * Copy guest read/write fields from the shadow VMCS.
1936 * Guest read-only fields cannot be modified, so no need to copy them.
1937 *
1938 * We don't check for VMREAD failures here for performance reasons and
1939 * because they are not expected to fail, barring irrecoverable conditions
1940 * like hardware errors.
1941 */
1942 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1943 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1944 {
1945 uint64_t u64Val;
1946 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1947 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1948 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1949 }
1950
1951 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1952 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1953 }
1954 return rc;
1955}
1956
1957
1958/**
1959 * Enables VMCS shadowing for the given VMCS info. object.
1960 *
1961 * @param pVCpu The cross context virtual CPU structure.
1962 * @param pVmcsInfo The VMCS info. object.
1963 *
1964 * @remarks No-long-jump zone!!!
1965 */
1966static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1967{
1968 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1969 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1970 {
1971 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1972 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1973 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1974 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1975 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1976 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1977 Log4Func(("Enabled\n"));
1978 }
1979}
1980
1981
1982/**
1983 * Disables VMCS shadowing for the given VMCS info. object.
1984 *
1985 * @param pVCpu The cross context virtual CPU structure.
1986 * @param pVmcsInfo The VMCS info. object.
1987 *
1988 * @remarks No-long-jump zone!!!
1989 */
1990static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1991{
1992 /*
1993 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1994 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1995 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
1996 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
1997 *
1998 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
1999 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2000 */
2001 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2002 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2003 {
2004 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2005 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2006 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2007 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2008 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2009 Log4Func(("Disabled\n"));
2010 }
2011}
2012#endif
2013
2014
2015/**
2016 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2017 *
2018 * The guest FPU state is always pre-loaded hence we don't need to bother about
2019 * sharing FPU related CR0 bits between the guest and host.
2020 *
2021 * @returns VBox status code.
2022 * @param pVCpu The cross context virtual CPU structure.
2023 * @param pVmxTransient The VMX-transient structure.
2024 *
2025 * @remarks No-long-jump zone!!!
2026 */
2027static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2028{
2029 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2030 {
2031 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2032 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2033
2034 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2035 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2036 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2037 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2038 else
2039 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2040
2041 if (!pVmxTransient->fIsNestedGuest)
2042 {
2043 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2044 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2045 uint64_t const u64ShadowCr0 = u64GuestCr0;
2046 Assert(!RT_HI_U32(u64GuestCr0));
2047
2048 /*
2049 * Setup VT-x's view of the guest CR0.
2050 */
2051 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2052 if (VM_IS_VMX_NESTED_PAGING(pVM))
2053 {
2054#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2055 if (CPUMIsGuestPagingEnabled(pVCpu))
2056 {
2057 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2058 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2059 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2060 }
2061 else
2062 {
2063 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2064 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2065 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2066 }
2067
2068 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2069 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2070 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2071#endif
2072 }
2073 else
2074 {
2075 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2076 u64GuestCr0 |= X86_CR0_WP;
2077 }
2078
2079 /*
2080 * Guest FPU bits.
2081 *
2082 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2083 * using CR0.TS.
2084 *
2085 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2086 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2087 */
2088 u64GuestCr0 |= X86_CR0_NE;
2089
2090 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2091 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2092
2093 /*
2094 * Update exception intercepts.
2095 */
2096 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2097#ifndef IN_NEM_DARWIN
2098 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2099 {
2100 Assert(PDMVmmDevHeapIsEnabled(pVM));
2101 Assert(pVM->hm.s.vmx.pRealModeTSS);
2102 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2103 }
2104 else
2105#endif
2106 {
2107 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2108 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2109 if (fInterceptMF)
2110 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2111 }
2112
2113 /* Additional intercepts for debugging, define these yourself explicitly. */
2114#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2115 uXcptBitmap |= 0
2116 | RT_BIT(X86_XCPT_BP)
2117 | RT_BIT(X86_XCPT_DE)
2118 | RT_BIT(X86_XCPT_NM)
2119 | RT_BIT(X86_XCPT_TS)
2120 | RT_BIT(X86_XCPT_UD)
2121 | RT_BIT(X86_XCPT_NP)
2122 | RT_BIT(X86_XCPT_SS)
2123 | RT_BIT(X86_XCPT_GP)
2124 | RT_BIT(X86_XCPT_PF)
2125 | RT_BIT(X86_XCPT_MF)
2126 ;
2127#elif defined(HMVMX_ALWAYS_TRAP_PF)
2128 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2129#endif
2130 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2131 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2132 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2133 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2134 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2135
2136 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2137 u64GuestCr0 |= fSetCr0;
2138 u64GuestCr0 &= fZapCr0;
2139 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2140
2141 /* Commit the CR0 and related fields to the guest VMCS. */
2142 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2143 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2144 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2145 {
2146 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2147 AssertRC(rc);
2148 }
2149 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2150 {
2151 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2152 AssertRC(rc);
2153 }
2154
2155 /* Update our caches. */
2156 pVmcsInfo->u32ProcCtls = uProcCtls;
2157 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2158
2159 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2160 }
2161 else
2162 {
2163 /*
2164 * With nested-guests, we may have extended the guest/host mask here since we
2165 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2166 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2167 * originally supplied. We must copy those bits from the nested-guest CR0 into
2168 * the nested-guest CR0 read-shadow.
2169 */
2170 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2171 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2172 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2173 Assert(!RT_HI_U32(u64GuestCr0));
2174 Assert(u64GuestCr0 & X86_CR0_NE);
2175
2176 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2177 u64GuestCr0 |= fSetCr0;
2178 u64GuestCr0 &= fZapCr0;
2179 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2180
2181 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2182 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2183 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2184
2185 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2186 }
2187
2188 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2189 }
2190
2191 return VINF_SUCCESS;
2192}
2193
2194
2195/**
2196 * Exports the guest control registers (CR3, CR4) into the guest-state area
2197 * in the VMCS.
2198 *
2199 * @returns VBox strict status code.
2200 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2201 * without unrestricted guest access and the VMMDev is not presently
2202 * mapped (e.g. EFI32).
2203 *
2204 * @param pVCpu The cross context virtual CPU structure.
2205 * @param pVmxTransient The VMX-transient structure.
2206 *
2207 * @remarks No-long-jump zone!!!
2208 */
2209static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2210{
2211 int rc = VINF_SUCCESS;
2212 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2213
2214 /*
2215 * Guest CR2.
2216 * It's always loaded in the assembler code. Nothing to do here.
2217 */
2218
2219 /*
2220 * Guest CR3.
2221 */
2222 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2223 {
2224 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2225
2226 if (VM_IS_VMX_NESTED_PAGING(pVM))
2227 {
2228#ifndef IN_NEM_DARWIN
2229 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2230 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2231
2232 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2233 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2234 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2235 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2236
2237 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2238 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2239 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2240
2241 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2242 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2243 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2244 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2245 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2246 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2247 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2248
2249 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2250 AssertRC(rc);
2251#endif
2252
2253 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2254 uint64_t u64GuestCr3 = pCtx->cr3;
2255 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2256 || CPUMIsGuestPagingEnabledEx(pCtx))
2257 {
2258 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2259 if (CPUMIsGuestInPAEModeEx(pCtx))
2260 {
2261 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2262 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2263 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2264 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2265 }
2266
2267 /*
2268 * The guest's view of its CR3 is unblemished with nested paging when the
2269 * guest is using paging or we have unrestricted guest execution to handle
2270 * the guest when it's not using paging.
2271 */
2272 }
2273#ifndef IN_NEM_DARWIN
2274 else
2275 {
2276 /*
2277 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2278 * thinks it accesses physical memory directly, we use our identity-mapped
2279 * page table to map guest-linear to guest-physical addresses. EPT takes care
2280 * of translating it to host-physical addresses.
2281 */
2282 RTGCPHYS GCPhys;
2283 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2284
2285 /* We obtain it here every time as the guest could have relocated this PCI region. */
2286 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2287 if (RT_SUCCESS(rc))
2288 { /* likely */ }
2289 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2290 {
2291 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2292 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2293 }
2294 else
2295 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2296
2297 u64GuestCr3 = GCPhys;
2298 }
2299#endif
2300
2301 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2302 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2303 AssertRC(rc);
2304 }
2305 else
2306 {
2307 Assert(!pVmxTransient->fIsNestedGuest);
2308 /* Non-nested paging case, just use the hypervisor's CR3. */
2309 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2310
2311 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2312 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2313 AssertRC(rc);
2314 }
2315
2316 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2317 }
2318
2319 /*
2320 * Guest CR4.
2321 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2322 */
2323 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2324 {
2325 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2326 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2327
2328 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2329 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2330
2331 /*
2332 * With nested-guests, we may have extended the guest/host mask here (since we
2333 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2334 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2335 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2336 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2337 */
2338 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2339 uint64_t u64GuestCr4 = pCtx->cr4;
2340 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2341 ? pCtx->cr4
2342 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2343 Assert(!RT_HI_U32(u64GuestCr4));
2344
2345#ifndef IN_NEM_DARWIN
2346 /*
2347 * Setup VT-x's view of the guest CR4.
2348 *
2349 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2350 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2351 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2352 *
2353 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2354 */
2355 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2356 {
2357 Assert(pVM->hm.s.vmx.pRealModeTSS);
2358 Assert(PDMVmmDevHeapIsEnabled(pVM));
2359 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2360 }
2361#endif
2362
2363 if (VM_IS_VMX_NESTED_PAGING(pVM))
2364 {
2365 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2366 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2367 {
2368 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2369 u64GuestCr4 |= X86_CR4_PSE;
2370 /* Our identity mapping is a 32-bit page directory. */
2371 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2372 }
2373 /* else use guest CR4.*/
2374 }
2375 else
2376 {
2377 Assert(!pVmxTransient->fIsNestedGuest);
2378
2379 /*
2380 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2381 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2382 */
2383 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2384 {
2385 case PGMMODE_REAL: /* Real-mode. */
2386 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2387 case PGMMODE_32_BIT: /* 32-bit paging. */
2388 {
2389 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2390 break;
2391 }
2392
2393 case PGMMODE_PAE: /* PAE paging. */
2394 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2395 {
2396 u64GuestCr4 |= X86_CR4_PAE;
2397 break;
2398 }
2399
2400 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2401 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2402 {
2403#ifdef VBOX_WITH_64_BITS_GUESTS
2404 /* For our assumption in vmxHCShouldSwapEferMsr. */
2405 Assert(u64GuestCr4 & X86_CR4_PAE);
2406 break;
2407#endif
2408 }
2409 default:
2410 AssertFailed();
2411 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2412 }
2413 }
2414
2415 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2416 u64GuestCr4 |= fSetCr4;
2417 u64GuestCr4 &= fZapCr4;
2418
2419 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2420 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2421 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2422
2423#ifndef IN_NEM_DARWIN
2424 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2425 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2426 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2427 {
2428 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2429 hmR0VmxUpdateStartVmFunction(pVCpu);
2430 }
2431#endif
2432
2433 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2434
2435 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2436 }
2437 return rc;
2438}
2439
2440
2441#ifdef VBOX_STRICT
2442/**
2443 * Strict function to validate segment registers.
2444 *
2445 * @param pVCpu The cross context virtual CPU structure.
2446 * @param pVmcsInfo The VMCS info. object.
2447 *
2448 * @remarks Will import guest CR0 on strict builds during validation of
2449 * segments.
2450 */
2451static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2452{
2453 /*
2454 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2455 *
2456 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2457 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2458 * unusable bit and doesn't change the guest-context value.
2459 */
2460 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2461 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2462 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2463 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2464 && ( !CPUMIsGuestInRealModeEx(pCtx)
2465 && !CPUMIsGuestInV86ModeEx(pCtx)))
2466 {
2467 /* Protected mode checks */
2468 /* CS */
2469 Assert(pCtx->cs.Attr.n.u1Present);
2470 Assert(!(pCtx->cs.Attr.u & 0xf00));
2471 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2472 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2473 || !(pCtx->cs.Attr.n.u1Granularity));
2474 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2475 || (pCtx->cs.Attr.n.u1Granularity));
2476 /* CS cannot be loaded with NULL in protected mode. */
2477 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2478 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2479 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2480 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2481 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2482 else
2483 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2484 /* SS */
2485 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2486 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2487 if ( !(pCtx->cr0 & X86_CR0_PE)
2488 || pCtx->cs.Attr.n.u4Type == 3)
2489 {
2490 Assert(!pCtx->ss.Attr.n.u2Dpl);
2491 }
2492 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2493 {
2494 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2495 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2496 Assert(pCtx->ss.Attr.n.u1Present);
2497 Assert(!(pCtx->ss.Attr.u & 0xf00));
2498 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2499 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2500 || !(pCtx->ss.Attr.n.u1Granularity));
2501 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2502 || (pCtx->ss.Attr.n.u1Granularity));
2503 }
2504 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2505 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2506 {
2507 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2508 Assert(pCtx->ds.Attr.n.u1Present);
2509 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2510 Assert(!(pCtx->ds.Attr.u & 0xf00));
2511 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2512 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2513 || !(pCtx->ds.Attr.n.u1Granularity));
2514 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2515 || (pCtx->ds.Attr.n.u1Granularity));
2516 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2517 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2518 }
2519 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2520 {
2521 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2522 Assert(pCtx->es.Attr.n.u1Present);
2523 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2524 Assert(!(pCtx->es.Attr.u & 0xf00));
2525 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2526 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2527 || !(pCtx->es.Attr.n.u1Granularity));
2528 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2529 || (pCtx->es.Attr.n.u1Granularity));
2530 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2531 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2532 }
2533 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2534 {
2535 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2536 Assert(pCtx->fs.Attr.n.u1Present);
2537 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2538 Assert(!(pCtx->fs.Attr.u & 0xf00));
2539 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2540 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2541 || !(pCtx->fs.Attr.n.u1Granularity));
2542 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2543 || (pCtx->fs.Attr.n.u1Granularity));
2544 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2545 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2546 }
2547 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2548 {
2549 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2550 Assert(pCtx->gs.Attr.n.u1Present);
2551 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2552 Assert(!(pCtx->gs.Attr.u & 0xf00));
2553 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2554 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2555 || !(pCtx->gs.Attr.n.u1Granularity));
2556 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2557 || (pCtx->gs.Attr.n.u1Granularity));
2558 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2559 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2560 }
2561 /* 64-bit capable CPUs. */
2562 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2563 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2564 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2565 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2566 }
2567 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2568 || ( CPUMIsGuestInRealModeEx(pCtx)
2569 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2570 {
2571 /* Real and v86 mode checks. */
2572 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2573 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2574#ifndef IN_NEM_DARWIN
2575 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2576 {
2577 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2578 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2579 }
2580 else
2581#endif
2582 {
2583 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2584 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2585 }
2586
2587 /* CS */
2588 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2589 Assert(pCtx->cs.u32Limit == 0xffff);
2590 AssertMsg(u32CSAttr == 0xf3, ("cs=%#x %#x ", pCtx->cs.Sel, u32CSAttr));
2591 /* SS */
2592 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2593 Assert(pCtx->ss.u32Limit == 0xffff);
2594 Assert(u32SSAttr == 0xf3);
2595 /* DS */
2596 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2597 Assert(pCtx->ds.u32Limit == 0xffff);
2598 Assert(u32DSAttr == 0xf3);
2599 /* ES */
2600 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2601 Assert(pCtx->es.u32Limit == 0xffff);
2602 Assert(u32ESAttr == 0xf3);
2603 /* FS */
2604 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2605 Assert(pCtx->fs.u32Limit == 0xffff);
2606 Assert(u32FSAttr == 0xf3);
2607 /* GS */
2608 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2609 Assert(pCtx->gs.u32Limit == 0xffff);
2610 Assert(u32GSAttr == 0xf3);
2611 /* 64-bit capable CPUs. */
2612 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2613 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2614 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2615 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2616 }
2617}
2618#endif /* VBOX_STRICT */
2619
2620
2621/**
2622 * Exports a guest segment register into the guest-state area in the VMCS.
2623 *
2624 * @returns VBox status code.
2625 * @param pVCpu The cross context virtual CPU structure.
2626 * @param pVmcsInfo The VMCS info. object.
2627 * @param iSegReg The segment register number (X86_SREG_XXX).
2628 * @param pSelReg Pointer to the segment selector.
2629 *
2630 * @remarks No-long-jump zone!!!
2631 */
2632static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2633{
2634 Assert(iSegReg < X86_SREG_COUNT);
2635
2636 uint32_t u32Access = pSelReg->Attr.u;
2637#ifndef IN_NEM_DARWIN
2638 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2639#endif
2640 {
2641 /*
2642 * The way to differentiate between whether this is really a null selector or was just
2643 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2644 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2645 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2646 * NULL selectors loaded in protected-mode have their attribute as 0.
2647 */
2648 if (u32Access)
2649 { }
2650 else
2651 u32Access = X86DESCATTR_UNUSABLE;
2652 }
2653#ifndef IN_NEM_DARWIN
2654 else
2655 {
2656 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2657 u32Access = 0xf3;
2658 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2659 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2660 RT_NOREF_PV(pVCpu);
2661 }
2662#else
2663 RT_NOREF(pVmcsInfo);
2664#endif
2665
2666 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2667 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2668 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2669
2670 /*
2671 * Commit it to the VMCS.
2672 */
2673 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2674 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2675 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2676 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2677 return VINF_SUCCESS;
2678}
2679
2680
2681/**
2682 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2683 * area in the VMCS.
2684 *
2685 * @returns VBox status code.
2686 * @param pVCpu The cross context virtual CPU structure.
2687 * @param pVmxTransient The VMX-transient structure.
2688 *
2689 * @remarks Will import guest CR0 on strict builds during validation of
2690 * segments.
2691 * @remarks No-long-jump zone!!!
2692 */
2693static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2694{
2695 int rc = VERR_INTERNAL_ERROR_5;
2696#ifndef IN_NEM_DARWIN
2697 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2698#endif
2699 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2700 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2701#ifndef IN_NEM_DARWIN
2702 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2703#endif
2704
2705 /*
2706 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2707 */
2708 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2709 {
2710 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2711 {
2712 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2713#ifndef IN_NEM_DARWIN
2714 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2715 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2716#endif
2717 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2718 AssertRC(rc);
2719 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2720 }
2721
2722 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2723 {
2724 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2725#ifndef IN_NEM_DARWIN
2726 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2727 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2728#endif
2729 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2730 AssertRC(rc);
2731 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2732 }
2733
2734 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2735 {
2736 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2737#ifndef IN_NEM_DARWIN
2738 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2739 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2740#endif
2741 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2742 AssertRC(rc);
2743 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2744 }
2745
2746 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2747 {
2748 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2749#ifndef IN_NEM_DARWIN
2750 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2751 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2752#endif
2753 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2754 AssertRC(rc);
2755 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2756 }
2757
2758 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2759 {
2760 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2761#ifndef IN_NEM_DARWIN
2762 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2763 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2764#endif
2765 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2766 AssertRC(rc);
2767 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2768 }
2769
2770 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2771 {
2772 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2773#ifndef IN_NEM_DARWIN
2774 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2775 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2776#endif
2777 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2778 AssertRC(rc);
2779 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2780 }
2781
2782#ifdef VBOX_STRICT
2783 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2784#endif
2785 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2786 pCtx->cs.Attr.u));
2787 }
2788
2789 /*
2790 * Guest TR.
2791 */
2792 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2793 {
2794 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2795
2796 /*
2797 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2798 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2799 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2800 */
2801 uint16_t u16Sel;
2802 uint32_t u32Limit;
2803 uint64_t u64Base;
2804 uint32_t u32AccessRights;
2805#ifndef IN_NEM_DARWIN
2806 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2807#endif
2808 {
2809 u16Sel = pCtx->tr.Sel;
2810 u32Limit = pCtx->tr.u32Limit;
2811 u64Base = pCtx->tr.u64Base;
2812 u32AccessRights = pCtx->tr.Attr.u;
2813 }
2814#ifndef IN_NEM_DARWIN
2815 else
2816 {
2817 Assert(!pVmxTransient->fIsNestedGuest);
2818 Assert(pVM->hm.s.vmx.pRealModeTSS);
2819 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2820
2821 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2822 RTGCPHYS GCPhys;
2823 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2824 AssertRCReturn(rc, rc);
2825
2826 X86DESCATTR DescAttr;
2827 DescAttr.u = 0;
2828 DescAttr.n.u1Present = 1;
2829 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2830
2831 u16Sel = 0;
2832 u32Limit = HM_VTX_TSS_SIZE;
2833 u64Base = GCPhys;
2834 u32AccessRights = DescAttr.u;
2835 }
2836#endif
2837
2838 /* Validate. */
2839 Assert(!(u16Sel & RT_BIT(2)));
2840 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2841 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2842 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2843 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2844 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2845 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2846 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2847 Assert( (u32Limit & 0xfff) == 0xfff
2848 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2849 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2850 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2851
2852 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2853 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2854 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2855 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2856
2857 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2858 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2859 }
2860
2861 /*
2862 * Guest GDTR.
2863 */
2864 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2865 {
2866 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2867
2868 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2869 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2870
2871 /* Validate. */
2872 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2873
2874 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2875 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2876 }
2877
2878 /*
2879 * Guest LDTR.
2880 */
2881 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2882 {
2883 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2884
2885 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2886 uint32_t u32Access;
2887 if ( !pVmxTransient->fIsNestedGuest
2888 && !pCtx->ldtr.Attr.u)
2889 u32Access = X86DESCATTR_UNUSABLE;
2890 else
2891 u32Access = pCtx->ldtr.Attr.u;
2892
2893 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2894 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2895 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2896 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2897
2898 /* Validate. */
2899 if (!(u32Access & X86DESCATTR_UNUSABLE))
2900 {
2901 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2902 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2903 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2904 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2905 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2906 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2907 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2908 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2909 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2910 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2911 }
2912
2913 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2914 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2915 }
2916
2917 /*
2918 * Guest IDTR.
2919 */
2920 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2921 {
2922 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2923
2924 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2925 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2926
2927 /* Validate. */
2928 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2929
2930 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2931 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2932 }
2933
2934 return VINF_SUCCESS;
2935}
2936
2937
2938/**
2939 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2940 * VM-exit interruption info type.
2941 *
2942 * @returns The IEM exception flags.
2943 * @param uVector The event vector.
2944 * @param uVmxEventType The VMX event type.
2945 *
2946 * @remarks This function currently only constructs flags required for
2947 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2948 * and CR2 aspects of an exception are not included).
2949 */
2950static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2951{
2952 uint32_t fIemXcptFlags;
2953 switch (uVmxEventType)
2954 {
2955 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2956 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2957 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2958 break;
2959
2960 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2961 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2962 break;
2963
2964 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2965 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2966 break;
2967
2968 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2969 {
2970 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2971 if (uVector == X86_XCPT_BP)
2972 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2973 else if (uVector == X86_XCPT_OF)
2974 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2975 else
2976 {
2977 fIemXcptFlags = 0;
2978 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2979 }
2980 break;
2981 }
2982
2983 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2984 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2985 break;
2986
2987 default:
2988 fIemXcptFlags = 0;
2989 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
2990 break;
2991 }
2992 return fIemXcptFlags;
2993}
2994
2995
2996/**
2997 * Sets an event as a pending event to be injected into the guest.
2998 *
2999 * @param pVCpu The cross context virtual CPU structure.
3000 * @param u32IntInfo The VM-entry interruption-information field.
3001 * @param cbInstr The VM-entry instruction length in bytes (for
3002 * software interrupts, exceptions and privileged
3003 * software exceptions).
3004 * @param u32ErrCode The VM-entry exception error code.
3005 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3006 * page-fault.
3007 */
3008DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3009 RTGCUINTPTR GCPtrFaultAddress)
3010{
3011 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3012 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3013 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3014 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3015 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3016 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3017}
3018
3019
3020/**
3021 * Sets an external interrupt as pending-for-injection into the VM.
3022 *
3023 * @param pVCpu The cross context virtual CPU structure.
3024 * @param u8Interrupt The external interrupt vector.
3025 */
3026DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3027{
3028 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3029 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3030 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3031 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3032 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3033}
3034
3035
3036/**
3037 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3038 *
3039 * @param pVCpu The cross context virtual CPU structure.
3040 */
3041DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3042{
3043 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3044 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3045 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3046 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3047 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3048}
3049
3050
3051/**
3052 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3053 *
3054 * @param pVCpu The cross context virtual CPU structure.
3055 */
3056DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3057{
3058 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3059 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3060 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3061 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3062 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3063}
3064
3065
3066/**
3067 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3068 *
3069 * @param pVCpu The cross context virtual CPU structure.
3070 */
3071DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3072{
3073 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3074 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3075 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3076 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3077 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3078}
3079
3080
3081/**
3082 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3083 *
3084 * @param pVCpu The cross context virtual CPU structure.
3085 */
3086DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3087{
3088 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3089 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3090 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3091 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3092 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3093}
3094
3095
3096#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3097/**
3098 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3099 *
3100 * @param pVCpu The cross context virtual CPU structure.
3101 * @param u32ErrCode The error code for the general-protection exception.
3102 */
3103DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3104{
3105 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3106 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3107 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3108 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3109 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3110}
3111
3112
3113/**
3114 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3115 *
3116 * @param pVCpu The cross context virtual CPU structure.
3117 * @param u32ErrCode The error code for the stack exception.
3118 */
3119DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3120{
3121 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3122 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3123 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3124 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3125 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3126}
3127#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3128
3129
3130/**
3131 * Fixes up attributes for the specified segment register.
3132 *
3133 * @param pVCpu The cross context virtual CPU structure.
3134 * @param pSelReg The segment register that needs fixing.
3135 * @param pszRegName The register name (for logging and assertions).
3136 */
3137static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3138{
3139 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3140
3141 /*
3142 * If VT-x marks the segment as unusable, most other bits remain undefined:
3143 * - For CS the L, D and G bits have meaning.
3144 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3145 * - For the remaining data segments no bits are defined.
3146 *
3147 * The present bit and the unusable bit has been observed to be set at the
3148 * same time (the selector was supposed to be invalid as we started executing
3149 * a V8086 interrupt in ring-0).
3150 *
3151 * What should be important for the rest of the VBox code, is that the P bit is
3152 * cleared. Some of the other VBox code recognizes the unusable bit, but
3153 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3154 * safe side here, we'll strip off P and other bits we don't care about. If
3155 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3156 *
3157 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3158 */
3159#ifdef VBOX_STRICT
3160 uint32_t const uAttr = pSelReg->Attr.u;
3161#endif
3162
3163 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3164 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3165 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3166
3167#ifdef VBOX_STRICT
3168# ifndef IN_NEM_DARWIN
3169 VMMRZCallRing3Disable(pVCpu);
3170# endif
3171 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3172# ifdef DEBUG_bird
3173 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3174 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3175 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3176# endif
3177# ifndef IN_NEM_DARWIN
3178 VMMRZCallRing3Enable(pVCpu);
3179# endif
3180 NOREF(uAttr);
3181#endif
3182 RT_NOREF2(pVCpu, pszRegName);
3183}
3184
3185
3186/**
3187 * Imports a guest segment register from the current VMCS into the guest-CPU
3188 * context.
3189 *
3190 * @param pVCpu The cross context virtual CPU structure.
3191 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3192 *
3193 * @remarks Called with interrupts and/or preemption disabled.
3194 */
3195template<uint32_t const a_iSegReg>
3196DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3197{
3198 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3199 /* Check that the macros we depend upon here and in the export parenter function works: */
3200#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3201 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3202 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3203 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3204 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3205 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3206 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3207 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3208 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3209 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3210 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3211
3212 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3213
3214 uint16_t u16Sel;
3215 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3216 pSelReg->Sel = u16Sel;
3217 pSelReg->ValidSel = u16Sel;
3218
3219 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3220 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3221
3222 uint32_t u32Attr;
3223 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3224 pSelReg->Attr.u = u32Attr;
3225 if (u32Attr & X86DESCATTR_UNUSABLE)
3226 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3227
3228 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3229}
3230
3231
3232/**
3233 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3234 *
3235 * @param pVCpu The cross context virtual CPU structure.
3236 *
3237 * @remarks Called with interrupts and/or preemption disabled.
3238 */
3239DECLINLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3240{
3241 uint16_t u16Sel;
3242 uint64_t u64Base;
3243 uint32_t u32Limit, u32Attr;
3244 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3245 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3246 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3247 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3248
3249 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3250 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3251 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3252 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3253 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3254 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3255 if (u32Attr & X86DESCATTR_UNUSABLE)
3256 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3257}
3258
3259
3260/**
3261 * Imports the guest TR from the current VMCS into the guest-CPU context.
3262 *
3263 * @param pVCpu The cross context virtual CPU structure.
3264 *
3265 * @remarks Called with interrupts and/or preemption disabled.
3266 */
3267DECLINLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3268{
3269 uint16_t u16Sel;
3270 uint64_t u64Base;
3271 uint32_t u32Limit, u32Attr;
3272 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3273 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3274 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3275 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3276
3277 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3278 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3279 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3280 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3281 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3282 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3283 /* TR is the only selector that can never be unusable. */
3284 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3285}
3286
3287
3288/**
3289 * Core: Imports the guest RIP from the VMCS back into the guest-CPU context.
3290 *
3291 * @returns The RIP value.
3292 * @param pVCpu The cross context virtual CPU structure.
3293 *
3294 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3295 * @remarks Do -not- call this function directly!
3296 */
3297DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3298{
3299 uint64_t u64Val;
3300 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3301 AssertRC(rc);
3302
3303 pVCpu->cpum.GstCtx.rip = u64Val;
3304
3305 return u64Val;
3306}
3307
3308
3309/**
3310 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3311 *
3312 * @param pVCpu The cross context virtual CPU structure.
3313 *
3314 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3315 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3316 * instead!!!
3317 */
3318DECLINLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3319{
3320 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3321 {
3322 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3323 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3324 }
3325}
3326
3327
3328/**
3329 * Core: Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3330 *
3331 * @param pVCpu The cross context virtual CPU structure.
3332 * @param pVmcsInfo The VMCS info. object.
3333 *
3334 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3335 * @remarks Do -not- call this function directly!
3336 */
3337DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3338{
3339 uint64_t fRFlags;
3340 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &fRFlags);
3341 AssertRC(rc);
3342
3343 Assert((fRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
3344 Assert((fRFlags & ~(uint64_t)(X86_EFL_1 | X86_EFL_LIVE_MASK)) == 0);
3345
3346 pVCpu->cpum.GstCtx.rflags.u = fRFlags;
3347#ifndef IN_NEM_DARWIN
3348 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3349 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
3350 { /* mostly likely */ }
3351 else
3352 {
3353 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3354 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3355 }
3356#else
3357 RT_NOREF(pVmcsInfo);
3358#endif
3359}
3360
3361
3362/**
3363 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3364 *
3365 * @param pVCpu The cross context virtual CPU structure.
3366 * @param pVmcsInfo The VMCS info. object.
3367 *
3368 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3369 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3370 * instead!!!
3371 */
3372DECLINLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3373{
3374 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3375 {
3376 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3377 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3378 }
3379}
3380
3381
3382/**
3383 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3384 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3385 */
3386DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3387{
3388 /*
3389 * We must import RIP here to set our EM interrupt-inhibited state.
3390 * We also import RFLAGS as our code that evaluates pending interrupts
3391 * before VM-entry requires it.
3392 */
3393 vmxHCImportGuestRip(pVCpu);
3394 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3395
3396 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3397 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3398 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3399 pVCpu->cpum.GstCtx.rip);
3400 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3401}
3402
3403
3404/**
3405 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3406 * context.
3407 *
3408 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3409 *
3410 * @param pVCpu The cross context virtual CPU structure.
3411 * @param pVmcsInfo The VMCS info. object.
3412 *
3413 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3414 * do not log!
3415 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3416 * instead!!!
3417 */
3418DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3419{
3420 uint32_t u32Val;
3421 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3422 if (!u32Val)
3423 {
3424 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3425 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3426 }
3427 else
3428 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3429}
3430
3431
3432/**
3433 * Worker for VMXR0ImportStateOnDemand.
3434 *
3435 * @returns VBox status code.
3436 * @param pVCpu The cross context virtual CPU structure.
3437 * @param pVmcsInfo The VMCS info. object.
3438 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3439 */
3440static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3441{
3442 int rc = VINF_SUCCESS;
3443 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3444 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3445 uint32_t u32Val;
3446
3447 /*
3448 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3449 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3450 * neither are other host platforms.
3451 *
3452 * Committing this temporarily as it prevents BSOD.
3453 *
3454 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3455 */
3456#ifdef RT_OS_WINDOWS
3457 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3458 return VERR_HM_IPE_1;
3459#endif
3460
3461 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3462
3463#ifndef IN_NEM_DARWIN
3464 /*
3465 * We disable interrupts to make the updating of the state and in particular
3466 * the fExtrn modification atomic wrt to preemption hooks.
3467 */
3468 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3469#endif
3470
3471 fWhat &= pCtx->fExtrn;
3472 if (fWhat)
3473 {
3474 do
3475 {
3476 if (fWhat & CPUMCTX_EXTRN_RIP)
3477 vmxHCImportGuestRip(pVCpu);
3478
3479 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3480 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3481
3482 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3483 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3484 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3485
3486 if (fWhat & CPUMCTX_EXTRN_RSP)
3487 {
3488 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3489 AssertRC(rc);
3490 }
3491
3492 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3493 {
3494 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3495#ifndef IN_NEM_DARWIN
3496 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3497#else
3498 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3499#endif
3500 if (fWhat & CPUMCTX_EXTRN_CS)
3501 {
3502 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3503 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3504 if (fRealOnV86Active)
3505 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3506 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3507 }
3508 if (fWhat & CPUMCTX_EXTRN_SS)
3509 {
3510 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3511 if (fRealOnV86Active)
3512 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3513 }
3514 if (fWhat & CPUMCTX_EXTRN_DS)
3515 {
3516 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3517 if (fRealOnV86Active)
3518 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3519 }
3520 if (fWhat & CPUMCTX_EXTRN_ES)
3521 {
3522 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3523 if (fRealOnV86Active)
3524 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3525 }
3526 if (fWhat & CPUMCTX_EXTRN_FS)
3527 {
3528 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3529 if (fRealOnV86Active)
3530 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3531 }
3532 if (fWhat & CPUMCTX_EXTRN_GS)
3533 {
3534 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3535 if (fRealOnV86Active)
3536 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3537 }
3538 }
3539
3540 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3541 {
3542 if (fWhat & CPUMCTX_EXTRN_LDTR)
3543 vmxHCImportGuestLdtr(pVCpu);
3544
3545 if (fWhat & CPUMCTX_EXTRN_GDTR)
3546 {
3547 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3548 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3549 pCtx->gdtr.cbGdt = u32Val;
3550 }
3551
3552 /* Guest IDTR. */
3553 if (fWhat & CPUMCTX_EXTRN_IDTR)
3554 {
3555 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3556 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3557 pCtx->idtr.cbIdt = u32Val;
3558 }
3559
3560 /* Guest TR. */
3561 if (fWhat & CPUMCTX_EXTRN_TR)
3562 {
3563#ifndef IN_NEM_DARWIN
3564 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3565 don't need to import that one. */
3566 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3567#endif
3568 vmxHCImportGuestTr(pVCpu);
3569 }
3570 }
3571
3572 if (fWhat & CPUMCTX_EXTRN_DR7)
3573 {
3574#ifndef IN_NEM_DARWIN
3575 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3576#endif
3577 {
3578 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3579 AssertRC(rc);
3580 }
3581 }
3582
3583 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3584 {
3585 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3586 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3587 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3588 pCtx->SysEnter.cs = u32Val;
3589 }
3590
3591#ifndef IN_NEM_DARWIN
3592 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3593 {
3594 if ( pVM->hmr0.s.fAllow64BitGuests
3595 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3596 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3597 }
3598
3599 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3600 {
3601 if ( pVM->hmr0.s.fAllow64BitGuests
3602 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3603 {
3604 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3605 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3606 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3607 }
3608 }
3609
3610 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3611 {
3612 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3613 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3614 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3615 Assert(pMsrs);
3616 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3617 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3618 for (uint32_t i = 0; i < cMsrs; i++)
3619 {
3620 uint32_t const idMsr = pMsrs[i].u32Msr;
3621 switch (idMsr)
3622 {
3623 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3624 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3625 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3626 default:
3627 {
3628 uint32_t idxLbrMsr;
3629 if (VM_IS_VMX_LBR(pVM))
3630 {
3631 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3632 {
3633 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3634 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3635 break;
3636 }
3637 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3638 {
3639 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3640 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3641 break;
3642 }
3643 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3644 {
3645 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3646 break;
3647 }
3648 /* Fallthru (no break) */
3649 }
3650 pCtx->fExtrn = 0;
3651 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3652 ASMSetFlags(fEFlags);
3653 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3654 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3655 }
3656 }
3657 }
3658 }
3659#endif
3660
3661 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3662 {
3663 if (fWhat & CPUMCTX_EXTRN_CR0)
3664 {
3665 uint64_t u64Cr0;
3666 uint64_t u64Shadow;
3667 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3668 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3669#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3670 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3671 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3672#else
3673 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3674 {
3675 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3676 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3677 }
3678 else
3679 {
3680 /*
3681 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3682 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3683 * re-construct CR0. See @bugref{9180#c95} for details.
3684 */
3685 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3686 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3687 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3688 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3689 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3690 }
3691#endif
3692#ifndef IN_NEM_DARWIN
3693 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3694#endif
3695 CPUMSetGuestCR0(pVCpu, u64Cr0);
3696#ifndef IN_NEM_DARWIN
3697 VMMRZCallRing3Enable(pVCpu);
3698#endif
3699 }
3700
3701 if (fWhat & CPUMCTX_EXTRN_CR4)
3702 {
3703 uint64_t u64Cr4;
3704 uint64_t u64Shadow;
3705 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3706 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3707#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3708 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3709 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3710#else
3711 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3712 {
3713 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3714 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3715 }
3716 else
3717 {
3718 /*
3719 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3720 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3721 * re-construct CR4. See @bugref{9180#c95} for details.
3722 */
3723 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3724 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3725 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3726 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3727 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3728 }
3729#endif
3730 pCtx->cr4 = u64Cr4;
3731 }
3732
3733 if (fWhat & CPUMCTX_EXTRN_CR3)
3734 {
3735 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3736 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3737 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3738 && CPUMIsGuestPagingEnabledEx(pCtx)))
3739 {
3740 uint64_t u64Cr3;
3741 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3742 if (pCtx->cr3 != u64Cr3)
3743 {
3744 pCtx->cr3 = u64Cr3;
3745 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3746 }
3747
3748 /*
3749 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3750 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3751 */
3752 if (CPUMIsGuestInPAEModeEx(pCtx))
3753 {
3754 X86PDPE aPaePdpes[4];
3755 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3756 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3757 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3758 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3759 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3760 {
3761 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3762 /* PGM now updates PAE PDPTEs while updating CR3. */
3763 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3764 }
3765 }
3766 }
3767 }
3768 }
3769
3770#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3771 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3772 {
3773 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3774 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3775 {
3776 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3777 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3778 if (RT_SUCCESS(rc))
3779 { /* likely */ }
3780 else
3781 break;
3782 }
3783 }
3784#endif
3785 } while (0);
3786
3787 if (RT_SUCCESS(rc))
3788 {
3789 /* Update fExtrn. */
3790 pCtx->fExtrn &= ~fWhat;
3791
3792 /* If everything has been imported, clear the HM keeper bit. */
3793 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3794 {
3795#ifndef IN_NEM_DARWIN
3796 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3797#else
3798 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3799#endif
3800 Assert(!pCtx->fExtrn);
3801 }
3802 }
3803 }
3804#ifndef IN_NEM_DARWIN
3805 else
3806 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3807
3808 /*
3809 * Restore interrupts.
3810 */
3811 ASMSetFlags(fEFlags);
3812#endif
3813
3814 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3815
3816 if (RT_SUCCESS(rc))
3817 { /* likely */ }
3818 else
3819 return rc;
3820
3821 /*
3822 * Honor any pending CR3 updates.
3823 *
3824 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3825 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3826 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3827 *
3828 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3829 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3830 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3831 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3832 *
3833 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3834 *
3835 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3836 */
3837 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3838#ifndef IN_NEM_DARWIN
3839 && VMMRZCallRing3IsEnabled(pVCpu)
3840#endif
3841 )
3842 {
3843 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3844 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3845 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3846 }
3847
3848 return VINF_SUCCESS;
3849}
3850
3851
3852/**
3853 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3854 *
3855 * @returns VBox status code.
3856 * @param pVCpu The cross context virtual CPU structure.
3857 * @param pVmcsInfo The VMCS info. object.
3858 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3859 * in NEM/darwin context.
3860 * @tparam a_fWhat What to import, zero or more bits from
3861 * HMVMX_CPUMCTX_EXTRN_ALL.
3862 */
3863template<uint64_t const a_fWhat>
3864static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3865{
3866 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3867 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3868 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3869 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3870
3871 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3872
3873 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3874
3875 /* RIP and RFLAGS may have been imported already by the post exit code
3876 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3877 of the code is skipping this part of the code. */
3878 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3879 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3880 {
3881 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3882 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3883
3884 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3885 {
3886 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3887 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3888 else
3889 vmxHCImportGuestCoreRip(pVCpu);
3890 }
3891 }
3892
3893 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3894 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3895 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3896
3897 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
3898 {
3899 if (a_fWhat & CPUMCTX_EXTRN_CS)
3900 {
3901 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3902 /** @todo try get rid of this carp, it smells and is probably never ever
3903 * used: */
3904 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3905 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3906 {
3907 vmxHCImportGuestCoreRip(pVCpu);
3908 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3909 }
3910 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3911 }
3912 if (a_fWhat & CPUMCTX_EXTRN_SS)
3913 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3914 if (a_fWhat & CPUMCTX_EXTRN_DS)
3915 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3916 if (a_fWhat & CPUMCTX_EXTRN_ES)
3917 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3918 if (a_fWhat & CPUMCTX_EXTRN_FS)
3919 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3920 if (a_fWhat & CPUMCTX_EXTRN_GS)
3921 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3922
3923 /* Guest TR.
3924 Real-mode emulation using virtual-8086 mode has the fake TSS
3925 (pRealModeTSS) in TR, don't need to import that one. */
3926#ifndef IN_NEM_DARWIN
3927 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
3928 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3929 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
3930#else
3931 if (a_fWhat & CPUMCTX_EXTRN_TR)
3932#endif
3933 vmxHCImportGuestTr(pVCpu);
3934
3935#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
3936 if (fRealOnV86Active)
3937 {
3938 if (a_fWhat & CPUMCTX_EXTRN_CS)
3939 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3940 if (a_fWhat & CPUMCTX_EXTRN_SS)
3941 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3942 if (a_fWhat & CPUMCTX_EXTRN_DS)
3943 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3944 if (a_fWhat & CPUMCTX_EXTRN_ES)
3945 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3946 if (a_fWhat & CPUMCTX_EXTRN_FS)
3947 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3948 if (a_fWhat & CPUMCTX_EXTRN_GS)
3949 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3950 }
3951#endif
3952 }
3953
3954 if (a_fWhat & CPUMCTX_EXTRN_RSP)
3955 {
3956 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
3957 AssertRC(rc);
3958 }
3959
3960 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
3961 vmxHCImportGuestLdtr(pVCpu);
3962
3963 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
3964 {
3965 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
3966 uint32_t u32Val;
3967 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
3968 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
3969 }
3970
3971 /* Guest IDTR. */
3972 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
3973 {
3974 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
3975 uint32_t u32Val;
3976 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
3977 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
3978 }
3979
3980 if (a_fWhat & CPUMCTX_EXTRN_DR7)
3981 {
3982#ifndef IN_NEM_DARWIN
3983 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3984#endif
3985 {
3986 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
3987 AssertRC(rc);
3988 }
3989 }
3990
3991 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3992 {
3993 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
3994 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
3995 uint32_t u32Val;
3996 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
3997 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
3998 }
3999
4000#ifndef IN_NEM_DARWIN
4001 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
4002 {
4003 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4004 && pVM->hmr0.s.fAllow64BitGuests)
4005 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4006 }
4007
4008 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4009 {
4010 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4011 && pVM->hmr0.s.fAllow64BitGuests)
4012 {
4013 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4014 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4015 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4016 }
4017 }
4018
4019 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4020 {
4021 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
4022 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
4023 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
4024 Assert(pMsrs);
4025 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
4026 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
4027 for (uint32_t i = 0; i < cMsrs; i++)
4028 {
4029 uint32_t const idMsr = pMsrs[i].u32Msr;
4030 switch (idMsr)
4031 {
4032 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
4033 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
4034 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
4035 default:
4036 {
4037 uint32_t idxLbrMsr;
4038 if (VM_IS_VMX_LBR(pVM))
4039 {
4040 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
4041 {
4042 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4043 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4044 break;
4045 }
4046 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
4047 {
4048 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4049 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4050 break;
4051 }
4052 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
4053 {
4054 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
4055 break;
4056 }
4057 }
4058 pVCpu->cpum.GstCtx.fExtrn = 0;
4059 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
4060 ASMSetFlags(fEFlags);
4061 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
4062 return VERR_HM_UNEXPECTED_LD_ST_MSR;
4063 }
4064 }
4065 }
4066 }
4067#endif
4068
4069 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4070 {
4071 uint64_t u64Cr0;
4072 uint64_t u64Shadow;
4073 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc1);
4074 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4075#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4076 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4077 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4078#else
4079 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4080 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4081 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4082 else
4083 {
4084 /*
4085 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
4086 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4087 * re-construct CR0. See @bugref{9180#c95} for details.
4088 */
4089 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4090 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4091 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4092 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
4093 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
4094 }
4095#endif
4096#ifndef IN_NEM_DARWIN
4097 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
4098#endif
4099 CPUMSetGuestCR0(pVCpu, u64Cr0);
4100#ifndef IN_NEM_DARWIN
4101 VMMRZCallRing3Enable(pVCpu);
4102#endif
4103 }
4104
4105 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4106 {
4107 uint64_t u64Cr4;
4108 uint64_t u64Shadow;
4109 int rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc1);
4110 int rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4111#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4112 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4113 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4114#else
4115 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4116 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4117 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4118 else
4119 {
4120 /*
4121 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
4122 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4123 * re-construct CR4. See @bugref{9180#c95} for details.
4124 */
4125 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4126 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4127 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4128 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
4129 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
4130 }
4131#endif
4132 pVCpu->cpum.GstCtx.cr4 = u64Cr4;
4133 }
4134
4135 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4136 {
4137 /* CR0.PG bit changes are always intercepted, so it's up to date. */
4138 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
4139 || ( VM_IS_VMX_NESTED_PAGING(pVM)
4140 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)))
4141 {
4142 uint64_t u64Cr3;
4143 int const rc0 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc0);
4144 if (pVCpu->cpum.GstCtx.cr3 != u64Cr3)
4145 {
4146 pVCpu->cpum.GstCtx.cr3 = u64Cr3;
4147 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4148 }
4149
4150 /*
4151 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
4152 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
4153 */
4154 if (CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx))
4155 {
4156 X86PDPE aPaePdpes[4];
4157 int const rc1 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc1);
4158 int const rc2 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc2);
4159 int const rc3 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc3);
4160 int const rc4 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc4);
4161 if (memcmp(&aPaePdpes[0], &pVCpu->cpum.GstCtx.aPaePdpes[0], sizeof(aPaePdpes)))
4162 {
4163 memcpy(&pVCpu->cpum.GstCtx.aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
4164 /* PGM now updates PAE PDPTEs while updating CR3. */
4165 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4166 }
4167 }
4168 }
4169 }
4170
4171#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4172 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4173 {
4174 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4175 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4176 {
4177 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4178 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4179 AssertRCReturn(rc, rc);
4180 }
4181 }
4182#endif
4183
4184 /* Update fExtrn. */
4185 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4186
4187 /* If everything has been imported, clear the HM keeper bit. */
4188 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4189 {
4190#ifndef IN_NEM_DARWIN
4191 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4192#else
4193 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4194#endif
4195 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4196 }
4197
4198 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4199
4200 /*
4201 * Honor any pending CR3 updates.
4202 *
4203 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4204 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4205 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4206 *
4207 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4208 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4209 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4210 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4211 *
4212 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4213 *
4214 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4215 */
4216#ifndef IN_NEM_DARWIN
4217 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4218 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4219 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4220 return VINF_SUCCESS;
4221 ASMSetFlags(fEFlags);
4222#else
4223 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4224 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4225 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4226 return VINF_SUCCESS;
4227 RT_NOREF_PV(fEFlags);
4228#endif
4229
4230 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4231 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4232 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4233 return VINF_SUCCESS;
4234}
4235
4236
4237/**
4238 * Internal state fetcher.
4239 *
4240 * @returns VBox status code.
4241 * @param pVCpu The cross context virtual CPU structure.
4242 * @param pVmcsInfo The VMCS info. object.
4243 * @param pszCaller For logging.
4244 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4245 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4246 * already. This is ORed together with @a a_fWhat when
4247 * calculating what needs fetching (just for safety).
4248 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4249 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4250 * already. This is ORed together with @a a_fWhat when
4251 * calculating what needs fetching (just for safety).
4252 */
4253template<uint64_t const a_fWhat,
4254 uint64_t const a_fDoneLocal = 0,
4255 uint64_t const a_fDonePostExit = 0
4256#ifndef IN_NEM_DARWIN
4257 | CPUMCTX_EXTRN_INHIBIT_INT
4258 | CPUMCTX_EXTRN_INHIBIT_NMI
4259# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4260 | HMVMX_CPUMCTX_EXTRN_ALL
4261# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4262 | CPUMCTX_EXTRN_RFLAGS
4263# endif
4264#else /* IN_NEM_DARWIN */
4265 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4266#endif /* IN_NEM_DARWIN */
4267>
4268DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4269{
4270 RT_NOREF_PV(pszCaller);
4271 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4272 {
4273#ifndef IN_NEM_DARWIN
4274 /*
4275 * We disable interrupts to make the updating of the state and in particular
4276 * the fExtrn modification atomic wrt to preemption hooks.
4277 */
4278 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4279#else
4280 RTCCUINTREG const fEFlags = 0;
4281#endif
4282
4283 /*
4284 * We combine all three parameters and take the (probably) inlined optimized
4285 * code path for the new things specified in a_fWhat.
4286 *
4287 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4288 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4289 * also take the streamlined path when both of these are cleared in fExtrn
4290 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4291 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4292 */
4293 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4294 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4295 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4296 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4297 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4298 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4299 {
4300 int const rc = vmxHCImportGuestStateInner< a_fWhat
4301 & HMVMX_CPUMCTX_EXTRN_ALL
4302 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4303#ifndef IN_NEM_DARWIN
4304 ASMSetFlags(fEFlags);
4305#endif
4306 return rc;
4307 }
4308
4309#ifndef IN_NEM_DARWIN
4310 ASMSetFlags(fEFlags);
4311#endif
4312
4313 /*
4314 * We shouldn't normally get here, but it may happen when executing
4315 * in the debug run-loops. Typically, everything should already have
4316 * been fetched then. Otherwise call the fallback state import function.
4317 */
4318 if (fWhatToDo == 0)
4319 { /* hope the cause was the debug loop or something similar */ }
4320 else
4321 {
4322 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4323 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4324 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4325 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4326 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4327 }
4328 }
4329 return VINF_SUCCESS;
4330}
4331
4332
4333/**
4334 * Check per-VM and per-VCPU force flag actions that require us to go back to
4335 * ring-3 for one reason or another.
4336 *
4337 * @returns Strict VBox status code (i.e. informational status codes too)
4338 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4339 * ring-3.
4340 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4341 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4342 * interrupts)
4343 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4344 * all EMTs to be in ring-3.
4345 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4346 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4347 * to the EM loop.
4348 *
4349 * @param pVCpu The cross context virtual CPU structure.
4350 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4351 * @param fStepping Whether we are single-stepping the guest using the
4352 * hypervisor debugger.
4353 *
4354 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4355 * is no longer in VMX non-root mode.
4356 */
4357static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4358{
4359#ifndef IN_NEM_DARWIN
4360 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4361#endif
4362
4363 /*
4364 * Update pending interrupts into the APIC's IRR.
4365 */
4366 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4367 APICUpdatePendingInterrupts(pVCpu);
4368
4369 /*
4370 * Anything pending? Should be more likely than not if we're doing a good job.
4371 */
4372 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4373 if ( !fStepping
4374 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4375 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4376 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4377 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4378 return VINF_SUCCESS;
4379
4380 /* Pending PGM C3 sync. */
4381 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4382 {
4383 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4384 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4385 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4386 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4387 if (rcStrict != VINF_SUCCESS)
4388 {
4389 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4390 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4391 return rcStrict;
4392 }
4393 }
4394
4395 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4396 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4397 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4398 {
4399 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4400 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4401 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4402 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4403 return rc;
4404 }
4405
4406 /* Pending VM request packets, such as hardware interrupts. */
4407 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4408 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4409 {
4410 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4411 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4412 return VINF_EM_PENDING_REQUEST;
4413 }
4414
4415 /* Pending PGM pool flushes. */
4416 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4417 {
4418 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4419 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4420 return VINF_PGM_POOL_FLUSH_PENDING;
4421 }
4422
4423 /* Pending DMA requests. */
4424 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4425 {
4426 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4427 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4428 return VINF_EM_RAW_TO_R3;
4429 }
4430
4431#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4432 /*
4433 * Pending nested-guest events.
4434 *
4435 * Please note the priority of these events are specified and important.
4436 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4437 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4438 */
4439 if (fIsNestedGuest)
4440 {
4441 /* Pending nested-guest APIC-write. */
4442 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4443 {
4444 Log4Func(("Pending nested-guest APIC-write\n"));
4445 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4446 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4447 return rcStrict;
4448 }
4449
4450 /* Pending nested-guest monitor-trap flag (MTF). */
4451 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4452 {
4453 Log4Func(("Pending nested-guest MTF\n"));
4454 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4455 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4456 return rcStrict;
4457 }
4458
4459 /* Pending nested-guest VMX-preemption timer expired. */
4460 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4461 {
4462 Log4Func(("Pending nested-guest preempt timer\n"));
4463 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4464 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4465 return rcStrict;
4466 }
4467 }
4468#else
4469 NOREF(fIsNestedGuest);
4470#endif
4471
4472 return VINF_SUCCESS;
4473}
4474
4475
4476/**
4477 * Converts any TRPM trap into a pending HM event. This is typically used when
4478 * entering from ring-3 (not longjmp returns).
4479 *
4480 * @param pVCpu The cross context virtual CPU structure.
4481 */
4482static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4483{
4484 Assert(TRPMHasTrap(pVCpu));
4485 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4486
4487 uint8_t uVector;
4488 TRPMEVENT enmTrpmEvent;
4489 uint32_t uErrCode;
4490 RTGCUINTPTR GCPtrFaultAddress;
4491 uint8_t cbInstr;
4492 bool fIcebp;
4493
4494 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4495 AssertRC(rc);
4496
4497 uint32_t u32IntInfo;
4498 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4499 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4500
4501 rc = TRPMResetTrap(pVCpu);
4502 AssertRC(rc);
4503 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4504 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4505
4506 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4507}
4508
4509
4510/**
4511 * Converts the pending HM event into a TRPM trap.
4512 *
4513 * @param pVCpu The cross context virtual CPU structure.
4514 */
4515static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4516{
4517 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4518
4519 /* If a trap was already pending, we did something wrong! */
4520 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4521
4522 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4523 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4524 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4525
4526 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4527
4528 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4529 AssertRC(rc);
4530
4531 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4532 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4533
4534 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4535 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4536 else
4537 {
4538 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4539 switch (uVectorType)
4540 {
4541 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4542 TRPMSetTrapDueToIcebp(pVCpu);
4543 RT_FALL_THRU();
4544 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4545 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4546 {
4547 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4548 || ( uVector == X86_XCPT_BP /* INT3 */
4549 || uVector == X86_XCPT_OF /* INTO */
4550 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4551 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4552 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4553 break;
4554 }
4555 }
4556 }
4557
4558 /* We're now done converting the pending event. */
4559 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4560}
4561
4562
4563/**
4564 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4565 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4566 *
4567 * @param pVCpu The cross context virtual CPU structure.
4568 * @param pVmcsInfo The VMCS info. object.
4569 */
4570static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4571{
4572 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4573 {
4574 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4575 {
4576 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4577 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4578 AssertRC(rc);
4579 }
4580 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4581}
4582
4583
4584/**
4585 * Clears the interrupt-window exiting control in the VMCS.
4586 *
4587 * @param pVCpu The cross context virtual CPU structure.
4588 * @param pVmcsInfo The VMCS info. object.
4589 */
4590DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4591{
4592 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4593 {
4594 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4595 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4596 AssertRC(rc);
4597 }
4598}
4599
4600
4601/**
4602 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4603 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4604 *
4605 * @param pVCpu The cross context virtual CPU structure.
4606 * @param pVmcsInfo The VMCS info. object.
4607 */
4608static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4609{
4610 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4611 {
4612 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4613 {
4614 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4615 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4616 AssertRC(rc);
4617 Log4Func(("Setup NMI-window exiting\n"));
4618 }
4619 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4620}
4621
4622
4623/**
4624 * Clears the NMI-window exiting control in the VMCS.
4625 *
4626 * @param pVCpu The cross context virtual CPU structure.
4627 * @param pVmcsInfo The VMCS info. object.
4628 */
4629DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4630{
4631 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4632 {
4633 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4634 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4635 AssertRC(rc);
4636 }
4637}
4638
4639
4640/**
4641 * Injects an event into the guest upon VM-entry by updating the relevant fields
4642 * in the VM-entry area in the VMCS.
4643 *
4644 * @returns Strict VBox status code (i.e. informational status codes too).
4645 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4646 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4647 *
4648 * @param pVCpu The cross context virtual CPU structure.
4649 * @param pVmcsInfo The VMCS info object.
4650 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4651 * @param pEvent The event being injected.
4652 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4653 * will be updated if necessary. This cannot not be NULL.
4654 * @param fStepping Whether we're single-stepping guest execution and should
4655 * return VINF_EM_DBG_STEPPED if the event is injected
4656 * directly (registers modified by us, not by hardware on
4657 * VM-entry).
4658 */
4659static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4660 bool fStepping, uint32_t *pfIntrState)
4661{
4662 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4663 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4664 Assert(pfIntrState);
4665
4666#ifdef IN_NEM_DARWIN
4667 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4668#endif
4669
4670 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4671 uint32_t u32IntInfo = pEvent->u64IntInfo;
4672 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4673 uint32_t const cbInstr = pEvent->cbInstr;
4674 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4675 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4676 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4677
4678#ifdef VBOX_STRICT
4679 /*
4680 * Validate the error-code-valid bit for hardware exceptions.
4681 * No error codes for exceptions in real-mode.
4682 *
4683 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4684 */
4685 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4686 && !CPUMIsGuestInRealModeEx(pCtx))
4687 {
4688 switch (uVector)
4689 {
4690 case X86_XCPT_PF:
4691 case X86_XCPT_DF:
4692 case X86_XCPT_TS:
4693 case X86_XCPT_NP:
4694 case X86_XCPT_SS:
4695 case X86_XCPT_GP:
4696 case X86_XCPT_AC:
4697 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4698 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4699 RT_FALL_THRU();
4700 default:
4701 break;
4702 }
4703 }
4704
4705 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4706 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4707 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4708#endif
4709
4710 RT_NOREF(uVector);
4711 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4712 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4713 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4714 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4715 {
4716 Assert(uVector <= X86_XCPT_LAST);
4717 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4718 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4719 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4720 }
4721 else
4722 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4723
4724 /*
4725 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4726 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4727 * interrupt handler in the (real-mode) guest.
4728 *
4729 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4730 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4731 */
4732 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4733 {
4734#ifndef IN_NEM_DARWIN
4735 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4736#endif
4737 {
4738 /*
4739 * For CPUs with unrestricted guest execution enabled and with the guest
4740 * in real-mode, we must not set the deliver-error-code bit.
4741 *
4742 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4743 */
4744 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4745 }
4746#ifndef IN_NEM_DARWIN
4747 else
4748 {
4749 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4750 Assert(PDMVmmDevHeapIsEnabled(pVM));
4751 Assert(pVM->hm.s.vmx.pRealModeTSS);
4752 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4753
4754 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4755 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4756 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4757 AssertRCReturn(rc2, rc2);
4758
4759 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4760 size_t const cbIdtEntry = sizeof(X86IDTR16);
4761 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4762 {
4763 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4764 if (uVector == X86_XCPT_DF)
4765 return VINF_EM_RESET;
4766
4767 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4768 No error codes for exceptions in real-mode. */
4769 if (uVector == X86_XCPT_GP)
4770 {
4771 static HMEVENT const s_EventXcptDf
4772 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4773 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4774 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4775 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4776 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4777 }
4778
4779 /*
4780 * If we're injecting an event with no valid IDT entry, inject a #GP.
4781 * No error codes for exceptions in real-mode.
4782 *
4783 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4784 */
4785 static HMEVENT const s_EventXcptGp
4786 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4787 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4788 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4789 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4790 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4791 }
4792
4793 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4794 uint16_t uGuestIp = pCtx->ip;
4795 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4796 {
4797 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4798 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4799 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4800 }
4801 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4802 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4803
4804 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4805 X86IDTR16 IdtEntry;
4806 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4807 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4808 AssertRCReturn(rc2, rc2);
4809
4810 /* Construct the stack frame for the interrupt/exception handler. */
4811 VBOXSTRICTRC rcStrict;
4812 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, (uint16_t)pCtx->eflags.u);
4813 if (rcStrict == VINF_SUCCESS)
4814 {
4815 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4816 if (rcStrict == VINF_SUCCESS)
4817 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4818 }
4819
4820 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4821 if (rcStrict == VINF_SUCCESS)
4822 {
4823 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4824 pCtx->rip = IdtEntry.offSel;
4825 pCtx->cs.Sel = IdtEntry.uSel;
4826 pCtx->cs.ValidSel = IdtEntry.uSel;
4827 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4828 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4829 && uVector == X86_XCPT_PF)
4830 pCtx->cr2 = GCPtrFault;
4831
4832 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4833 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4834 | HM_CHANGED_GUEST_RSP);
4835
4836 /*
4837 * If we delivered a hardware exception (other than an NMI) and if there was
4838 * block-by-STI in effect, we should clear it.
4839 */
4840 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4841 {
4842 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4843 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4844 Log4Func(("Clearing inhibition due to STI\n"));
4845 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4846 }
4847
4848 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4849 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4850
4851 /*
4852 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4853 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4854 */
4855 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4856
4857 /*
4858 * If we eventually support nested-guest execution without unrestricted guest execution,
4859 * we should set fInterceptEvents here.
4860 */
4861 Assert(!fIsNestedGuest);
4862
4863 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4864 if (fStepping)
4865 rcStrict = VINF_EM_DBG_STEPPED;
4866 }
4867 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4868 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4869 return rcStrict;
4870 }
4871#else
4872 RT_NOREF(pVmcsInfo);
4873#endif
4874 }
4875
4876 /*
4877 * Validate.
4878 */
4879 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4880 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4881
4882 /*
4883 * Inject the event into the VMCS.
4884 */
4885 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4886 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4887 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4888 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4889 AssertRC(rc);
4890
4891 /*
4892 * Update guest CR2 if this is a page-fault.
4893 */
4894 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4895 pCtx->cr2 = GCPtrFault;
4896
4897 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4898 return VINF_SUCCESS;
4899}
4900
4901
4902/**
4903 * Evaluates the event to be delivered to the guest and sets it as the pending
4904 * event.
4905 *
4906 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4907 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4908 * NOT restore these force-flags.
4909 *
4910 * @returns Strict VBox status code (i.e. informational status codes too).
4911 * @param pVCpu The cross context virtual CPU structure.
4912 * @param pVmcsInfo The VMCS information structure.
4913 * @param fIsNestedGuest Flag whether the evaluation happens for a nested guest.
4914 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4915 */
4916static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4917{
4918 Assert(pfIntrState);
4919 Assert(!TRPMHasTrap(pVCpu));
4920
4921 /*
4922 * Compute/update guest-interruptibility state related FFs.
4923 * The FFs will be used below while evaluating events to be injected.
4924 */
4925 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4926
4927 /*
4928 * Evaluate if a new event needs to be injected.
4929 * An event that's already pending has already performed all necessary checks.
4930 */
4931 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4932 && !CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
4933 {
4934 /** @todo SMI. SMIs take priority over NMIs. */
4935
4936 /*
4937 * NMIs.
4938 * NMIs take priority over external interrupts.
4939 */
4940#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4941 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4942#endif
4943 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4944 {
4945 /*
4946 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4947 *
4948 * For a nested-guest, the FF always indicates the outer guest's ability to
4949 * receive an NMI while the guest-interruptibility state bit depends on whether
4950 * the nested-hypervisor is using virtual-NMIs.
4951 */
4952 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4953 {
4954#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4955 if ( fIsNestedGuest
4956 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4957 return IEMExecVmxVmexitXcptNmi(pVCpu);
4958#endif
4959 vmxHCSetPendingXcptNmi(pVCpu);
4960 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4961 Log4Func(("NMI pending injection\n"));
4962
4963 /* We've injected the NMI, bail. */
4964 return VINF_SUCCESS;
4965 }
4966 if (!fIsNestedGuest)
4967 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4968 }
4969
4970 /*
4971 * External interrupts (PIC/APIC).
4972 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4973 * We cannot re-request the interrupt from the controller again.
4974 */
4975 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4976 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4977 {
4978 Assert(!DBGFIsStepping(pVCpu));
4979 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4980 AssertRC(rc);
4981
4982 /*
4983 * We must not check EFLAGS directly when executing a nested-guest, use
4984 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4985 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4986 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4987 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4988 *
4989 * See Intel spec. 25.4.1 "Event Blocking".
4990 */
4991 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4992 {
4993#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4994 if ( fIsNestedGuest
4995 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4996 {
4997 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4998 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4999 return rcStrict;
5000 }
5001#endif
5002 uint8_t u8Interrupt;
5003 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5004 if (RT_SUCCESS(rc))
5005 {
5006#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5007 if ( fIsNestedGuest
5008 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5009 {
5010 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
5011 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5012 return rcStrict;
5013 }
5014#endif
5015 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
5016 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
5017 }
5018 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
5019 {
5020 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
5021
5022 if ( !fIsNestedGuest
5023 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
5024 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
5025 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
5026
5027 /*
5028 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
5029 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
5030 * need to re-set this force-flag here.
5031 */
5032 }
5033 else
5034 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
5035
5036 /* We've injected the interrupt or taken necessary action, bail. */
5037 return VINF_SUCCESS;
5038 }
5039 if (!fIsNestedGuest)
5040 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5041 }
5042 }
5043 else if (!fIsNestedGuest)
5044 {
5045 /*
5046 * An event is being injected or we are in an interrupt shadow. Check if another event is
5047 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
5048 * the pending event.
5049 */
5050 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5051 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
5052 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5053 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5054 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5055 }
5056 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
5057
5058 return VINF_SUCCESS;
5059}
5060
5061
5062/**
5063 * Injects any pending events into the guest if the guest is in a state to
5064 * receive them.
5065 *
5066 * @returns Strict VBox status code (i.e. informational status codes too).
5067 * @param pVCpu The cross context virtual CPU structure.
5068 * @param pVmcsInfo The VMCS information structure.
5069 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5070 * @param fIntrState The VT-x guest-interruptibility state.
5071 * @param fStepping Whether we are single-stepping the guest using the
5072 * hypervisor debugger and should return
5073 * VINF_EM_DBG_STEPPED if the event was dispatched
5074 * directly.
5075 */
5076static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5077 uint32_t fIntrState, bool fStepping)
5078{
5079 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5080#ifndef IN_NEM_DARWIN
5081 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5082#endif
5083
5084#ifdef VBOX_STRICT
5085 /*
5086 * Verify guest-interruptibility state.
5087 *
5088 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5089 * since injecting an event may modify the interruptibility state and we must thus always
5090 * use fIntrState.
5091 */
5092 {
5093 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5094 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5095 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5096 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5097 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5098 Assert(!TRPMHasTrap(pVCpu));
5099 NOREF(fBlockMovSS); NOREF(fBlockSti);
5100 }
5101#endif
5102
5103 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5104 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5105 {
5106 /*
5107 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5108 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5109 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5110 *
5111 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5112 */
5113 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5114#ifdef VBOX_STRICT
5115 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5116 {
5117 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5118 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5119 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5120 }
5121 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5122 {
5123 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5124 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5125 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5126 }
5127#endif
5128 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5129 uIntType));
5130
5131 /*
5132 * Inject the event and get any changes to the guest-interruptibility state.
5133 *
5134 * The guest-interruptibility state may need to be updated if we inject the event
5135 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5136 */
5137 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5138 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5139
5140 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5141 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5142 else
5143 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5144 }
5145
5146 /*
5147 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5148 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5149 */
5150 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5151 && !fIsNestedGuest)
5152 {
5153 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5154
5155 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5156 {
5157 /*
5158 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5159 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5160 */
5161 Assert(!DBGFIsStepping(pVCpu));
5162 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_TF);
5163 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
5164 fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5165 AssertRC(rc);
5166 }
5167 else
5168 {
5169 /*
5170 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5171 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5172 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5173 * we use MTF, so just make sure it's called before executing guest-code.
5174 */
5175 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5176 }
5177 }
5178 /* else: for nested-guest currently handling while merging controls. */
5179
5180 /*
5181 * Finally, update the guest-interruptibility state.
5182 *
5183 * This is required for the real-on-v86 software interrupt injection, for
5184 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5185 */
5186 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5187 AssertRC(rc);
5188
5189 /*
5190 * There's no need to clear the VM-entry interruption-information field here if we're not
5191 * injecting anything. VT-x clears the valid bit on every VM-exit.
5192 *
5193 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5194 */
5195
5196 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5197 return rcStrict;
5198}
5199
5200
5201/**
5202 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5203 * and update error record fields accordingly.
5204 *
5205 * @returns VMX_IGS_* error codes.
5206 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5207 * wrong with the guest state.
5208 *
5209 * @param pVCpu The cross context virtual CPU structure.
5210 * @param pVmcsInfo The VMCS info. object.
5211 *
5212 * @remarks This function assumes our cache of the VMCS controls
5213 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5214 */
5215static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5216{
5217#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5218#define HMVMX_CHECK_BREAK(expr, err) do { \
5219 if (!(expr)) { uError = (err); break; } \
5220 } while (0)
5221
5222 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5223 uint32_t uError = VMX_IGS_ERROR;
5224 uint32_t u32IntrState = 0;
5225#ifndef IN_NEM_DARWIN
5226 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5227 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5228#else
5229 bool const fUnrestrictedGuest = true;
5230#endif
5231 do
5232 {
5233 int rc;
5234
5235 /*
5236 * Guest-interruptibility state.
5237 *
5238 * Read this first so that any check that fails prior to those that actually
5239 * require the guest-interruptibility state would still reflect the correct
5240 * VMCS value and avoids causing further confusion.
5241 */
5242 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5243 AssertRC(rc);
5244
5245 uint32_t u32Val;
5246 uint64_t u64Val;
5247
5248 /*
5249 * CR0.
5250 */
5251 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5252 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5253 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5254 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5255 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5256 if (fUnrestrictedGuest)
5257 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5258
5259 uint64_t u64GuestCr0;
5260 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5261 AssertRC(rc);
5262 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5263 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5264 if ( !fUnrestrictedGuest
5265 && (u64GuestCr0 & X86_CR0_PG)
5266 && !(u64GuestCr0 & X86_CR0_PE))
5267 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5268
5269 /*
5270 * CR4.
5271 */
5272 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5273 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5274 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5275
5276 uint64_t u64GuestCr4;
5277 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5278 AssertRC(rc);
5279 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5280 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5281
5282 /*
5283 * IA32_DEBUGCTL MSR.
5284 */
5285 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5286 AssertRC(rc);
5287 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5288 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5289 {
5290 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5291 }
5292 uint64_t u64DebugCtlMsr = u64Val;
5293
5294#ifdef VBOX_STRICT
5295 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5296 AssertRC(rc);
5297 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5298#endif
5299 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5300
5301 /*
5302 * RIP and RFLAGS.
5303 */
5304 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5305 AssertRC(rc);
5306 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5307 if ( !fLongModeGuest
5308 || !pCtx->cs.Attr.n.u1Long)
5309 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5310 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5311 * must be identical if the "IA-32e mode guest" VM-entry
5312 * control is 1 and CS.L is 1. No check applies if the
5313 * CPU supports 64 linear-address bits. */
5314
5315 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5316 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5317 AssertRC(rc);
5318 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5319 VMX_IGS_RFLAGS_RESERVED);
5320 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5321 uint32_t const u32Eflags = u64Val;
5322
5323 if ( fLongModeGuest
5324 || ( fUnrestrictedGuest
5325 && !(u64GuestCr0 & X86_CR0_PE)))
5326 {
5327 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5328 }
5329
5330 uint32_t u32EntryInfo;
5331 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5332 AssertRC(rc);
5333 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5334 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5335
5336 /*
5337 * 64-bit checks.
5338 */
5339 if (fLongModeGuest)
5340 {
5341 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5342 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5343 }
5344
5345 if ( !fLongModeGuest
5346 && (u64GuestCr4 & X86_CR4_PCIDE))
5347 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5348
5349 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5350 * 51:32 beyond the processor's physical-address width are 0. */
5351
5352 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5353 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5354 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5355
5356#ifndef IN_NEM_DARWIN
5357 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5358 AssertRC(rc);
5359 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5360
5361 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5362 AssertRC(rc);
5363 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5364#endif
5365
5366 /*
5367 * PERF_GLOBAL MSR.
5368 */
5369 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5370 {
5371 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5372 AssertRC(rc);
5373 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5374 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5375 }
5376
5377 /*
5378 * PAT MSR.
5379 */
5380 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5381 {
5382 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5383 AssertRC(rc);
5384 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5385 for (unsigned i = 0; i < 8; i++)
5386 {
5387 uint8_t u8Val = (u64Val & 0xff);
5388 if ( u8Val != 0 /* UC */
5389 && u8Val != 1 /* WC */
5390 && u8Val != 4 /* WT */
5391 && u8Val != 5 /* WP */
5392 && u8Val != 6 /* WB */
5393 && u8Val != 7 /* UC- */)
5394 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5395 u64Val >>= 8;
5396 }
5397 }
5398
5399 /*
5400 * EFER MSR.
5401 */
5402 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5403 {
5404 Assert(g_fHmVmxSupportsVmcsEfer);
5405 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5406 AssertRC(rc);
5407 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5408 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5409 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5410 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5411 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5412 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5413 * iemVmxVmentryCheckGuestState(). */
5414 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5415 || !(u64GuestCr0 & X86_CR0_PG)
5416 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5417 VMX_IGS_EFER_LMA_LME_MISMATCH);
5418 }
5419
5420 /*
5421 * Segment registers.
5422 */
5423 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5424 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5425 if (!(u32Eflags & X86_EFL_VM))
5426 {
5427 /* CS */
5428 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5429 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5430 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5431 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5432 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5433 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5434 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5435 /* CS cannot be loaded with NULL in protected mode. */
5436 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5437 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5438 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5439 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5440 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5441 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5442 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5443 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5444 else
5445 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5446
5447 /* SS */
5448 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5449 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5450 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5451 if ( !(pCtx->cr0 & X86_CR0_PE)
5452 || pCtx->cs.Attr.n.u4Type == 3)
5453 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5454
5455 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5456 {
5457 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5458 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5459 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5460 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5461 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5462 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5463 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5464 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5465 }
5466
5467 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5468 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5469 {
5470 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5471 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5472 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5473 || pCtx->ds.Attr.n.u4Type > 11
5474 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5475 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5476 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5477 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5478 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5479 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5480 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5481 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5482 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5483 }
5484 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5485 {
5486 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5487 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5488 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5489 || pCtx->es.Attr.n.u4Type > 11
5490 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5491 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5492 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5493 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5494 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5495 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5496 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5497 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5498 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5499 }
5500 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5501 {
5502 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5503 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5504 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5505 || pCtx->fs.Attr.n.u4Type > 11
5506 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5507 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5508 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5509 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5510 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5511 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5512 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5513 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5514 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5515 }
5516 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5517 {
5518 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5519 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5520 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5521 || pCtx->gs.Attr.n.u4Type > 11
5522 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5523 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5524 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5525 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5526 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5527 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5528 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5529 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5530 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5531 }
5532 /* 64-bit capable CPUs. */
5533 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5534 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5535 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5536 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5537 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5538 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5539 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5540 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5541 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5542 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5543 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5544 }
5545 else
5546 {
5547 /* V86 mode checks. */
5548 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5549 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5550 {
5551 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5552 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5553 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5554 }
5555 else
5556 {
5557 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5558 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5559 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5560 }
5561
5562 /* CS */
5563 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5564 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5565 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5566 /* SS */
5567 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5568 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5569 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5570 /* DS */
5571 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5572 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5573 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5574 /* ES */
5575 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5576 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5577 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5578 /* FS */
5579 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5580 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5581 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5582 /* GS */
5583 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5584 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5585 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5586 /* 64-bit capable CPUs. */
5587 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5588 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5589 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5590 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5591 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5592 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5593 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5594 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5595 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5596 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5597 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5598 }
5599
5600 /*
5601 * TR.
5602 */
5603 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5604 /* 64-bit capable CPUs. */
5605 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5606 if (fLongModeGuest)
5607 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5608 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5609 else
5610 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5611 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5612 VMX_IGS_TR_ATTR_TYPE_INVALID);
5613 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5614 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5615 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5616 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5617 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5618 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5619 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5620 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5621
5622 /*
5623 * GDTR and IDTR (64-bit capable checks).
5624 */
5625 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5626 AssertRC(rc);
5627 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5628
5629 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5630 AssertRC(rc);
5631 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5632
5633 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5634 AssertRC(rc);
5635 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5636
5637 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5638 AssertRC(rc);
5639 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5640
5641 /*
5642 * Guest Non-Register State.
5643 */
5644 /* Activity State. */
5645 uint32_t u32ActivityState;
5646 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5647 AssertRC(rc);
5648 HMVMX_CHECK_BREAK( !u32ActivityState
5649 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5650 VMX_IGS_ACTIVITY_STATE_INVALID);
5651 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5652 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5653
5654 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5655 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5656 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5657
5658 /** @todo Activity state and injecting interrupts. Left as a todo since we
5659 * currently don't use activity states but ACTIVE. */
5660
5661 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5662 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5663
5664 /* Guest interruptibility-state. */
5665 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5666 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5667 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5668 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5669 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5670 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5671 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5672 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5673 {
5674 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5675 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5676 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5677 }
5678 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5679 {
5680 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5681 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5682 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5683 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5684 }
5685 /** @todo Assumes the processor is not in SMM. */
5686 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5687 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5688 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5689 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5690 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5691 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5692 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5693 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5694
5695 /* Pending debug exceptions. */
5696 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5697 AssertRC(rc);
5698 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5699 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5700 u32Val = u64Val; /* For pending debug exceptions checks below. */
5701
5702 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5703 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5704 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5705 {
5706 if ( (u32Eflags & X86_EFL_TF)
5707 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5708 {
5709 /* Bit 14 is PendingDebug.BS. */
5710 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5711 }
5712 if ( !(u32Eflags & X86_EFL_TF)
5713 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5714 {
5715 /* Bit 14 is PendingDebug.BS. */
5716 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5717 }
5718 }
5719
5720#ifndef IN_NEM_DARWIN
5721 /* VMCS link pointer. */
5722 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5723 AssertRC(rc);
5724 if (u64Val != UINT64_C(0xffffffffffffffff))
5725 {
5726 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5727 /** @todo Bits beyond the processor's physical-address width MBZ. */
5728 /** @todo SMM checks. */
5729 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5730 Assert(pVmcsInfo->pvShadowVmcs);
5731 VMXVMCSREVID VmcsRevId;
5732 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5733 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5734 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5735 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5736 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5737 }
5738
5739 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5740 * not using nested paging? */
5741 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5742 && !fLongModeGuest
5743 && CPUMIsGuestInPAEModeEx(pCtx))
5744 {
5745 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5746 AssertRC(rc);
5747 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5748
5749 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5750 AssertRC(rc);
5751 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5752
5753 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5754 AssertRC(rc);
5755 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5756
5757 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5758 AssertRC(rc);
5759 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5760 }
5761#endif
5762
5763 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5764 if (uError == VMX_IGS_ERROR)
5765 uError = VMX_IGS_REASON_NOT_FOUND;
5766 } while (0);
5767
5768 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5769 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5770 return uError;
5771
5772#undef HMVMX_ERROR_BREAK
5773#undef HMVMX_CHECK_BREAK
5774}
5775
5776
5777#ifndef HMVMX_USE_FUNCTION_TABLE
5778/**
5779 * Handles a guest VM-exit from hardware-assisted VMX execution.
5780 *
5781 * @returns Strict VBox status code (i.e. informational status codes too).
5782 * @param pVCpu The cross context virtual CPU structure.
5783 * @param pVmxTransient The VMX-transient structure.
5784 */
5785DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5786{
5787#ifdef DEBUG_ramshankar
5788# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5789 do { \
5790 if (a_fSave != 0) \
5791 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5792 VBOXSTRICTRC rcStrict = a_CallExpr; \
5793 if (a_fSave != 0) \
5794 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5795 return rcStrict; \
5796 } while (0)
5797#else
5798# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5799#endif
5800 uint32_t const uExitReason = pVmxTransient->uExitReason;
5801 switch (uExitReason)
5802 {
5803 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5804 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5805 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5806 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5807 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5808 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5809 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5810 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5811 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5812 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5813 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5814 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5815 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5816 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5817 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5818 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5819 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5820 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5821 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5822 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5823 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5824 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5825 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5826 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5827 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5828 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5829 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5830 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5831 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5832 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5833#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5834 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5835 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5836 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5837 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5838 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5839 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5840 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5841 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5842 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5843 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5844#else
5845 case VMX_EXIT_VMCLEAR:
5846 case VMX_EXIT_VMLAUNCH:
5847 case VMX_EXIT_VMPTRLD:
5848 case VMX_EXIT_VMPTRST:
5849 case VMX_EXIT_VMREAD:
5850 case VMX_EXIT_VMRESUME:
5851 case VMX_EXIT_VMWRITE:
5852 case VMX_EXIT_VMXOFF:
5853 case VMX_EXIT_VMXON:
5854 case VMX_EXIT_INVVPID:
5855 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5856#endif
5857#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5858 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5859#else
5860 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5861#endif
5862
5863 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5864 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5865 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5866
5867 case VMX_EXIT_INIT_SIGNAL:
5868 case VMX_EXIT_SIPI:
5869 case VMX_EXIT_IO_SMI:
5870 case VMX_EXIT_SMI:
5871 case VMX_EXIT_ERR_MSR_LOAD:
5872 case VMX_EXIT_ERR_MACHINE_CHECK:
5873 case VMX_EXIT_PML_FULL:
5874 case VMX_EXIT_VIRTUALIZED_EOI:
5875 case VMX_EXIT_GDTR_IDTR_ACCESS:
5876 case VMX_EXIT_LDTR_TR_ACCESS:
5877 case VMX_EXIT_APIC_WRITE:
5878 case VMX_EXIT_RDRAND:
5879 case VMX_EXIT_RSM:
5880 case VMX_EXIT_VMFUNC:
5881 case VMX_EXIT_ENCLS:
5882 case VMX_EXIT_RDSEED:
5883 case VMX_EXIT_XSAVES:
5884 case VMX_EXIT_XRSTORS:
5885 case VMX_EXIT_UMWAIT:
5886 case VMX_EXIT_TPAUSE:
5887 case VMX_EXIT_LOADIWKEY:
5888 default:
5889 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5890 }
5891#undef VMEXIT_CALL_RET
5892}
5893#endif /* !HMVMX_USE_FUNCTION_TABLE */
5894
5895
5896#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5897/**
5898 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5899 *
5900 * @returns Strict VBox status code (i.e. informational status codes too).
5901 * @param pVCpu The cross context virtual CPU structure.
5902 * @param pVmxTransient The VMX-transient structure.
5903 */
5904DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5905{
5906 uint32_t const uExitReason = pVmxTransient->uExitReason;
5907 switch (uExitReason)
5908 {
5909# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5910 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5911 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5912# else
5913 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5914 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5915# endif
5916 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5917 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5918 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5919
5920 /*
5921 * We shouldn't direct host physical interrupts to the nested-guest.
5922 */
5923 case VMX_EXIT_EXT_INT:
5924 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5925
5926 /*
5927 * Instructions that cause VM-exits unconditionally or the condition is
5928 * always taken solely from the nested hypervisor (meaning if the VM-exit
5929 * happens, it's guaranteed to be a nested-guest VM-exit).
5930 *
5931 * - Provides VM-exit instruction length ONLY.
5932 */
5933 case VMX_EXIT_CPUID: /* Unconditional. */
5934 case VMX_EXIT_VMCALL:
5935 case VMX_EXIT_GETSEC:
5936 case VMX_EXIT_INVD:
5937 case VMX_EXIT_XSETBV:
5938 case VMX_EXIT_VMLAUNCH:
5939 case VMX_EXIT_VMRESUME:
5940 case VMX_EXIT_VMXOFF:
5941 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5942 case VMX_EXIT_VMFUNC:
5943 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5944
5945 /*
5946 * Instructions that cause VM-exits unconditionally or the condition is
5947 * always taken solely from the nested hypervisor (meaning if the VM-exit
5948 * happens, it's guaranteed to be a nested-guest VM-exit).
5949 *
5950 * - Provides VM-exit instruction length.
5951 * - Provides VM-exit information.
5952 * - Optionally provides Exit qualification.
5953 *
5954 * Since Exit qualification is 0 for all VM-exits where it is not
5955 * applicable, reading and passing it to the guest should produce
5956 * defined behavior.
5957 *
5958 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5959 */
5960 case VMX_EXIT_INVEPT: /* Unconditional. */
5961 case VMX_EXIT_INVVPID:
5962 case VMX_EXIT_VMCLEAR:
5963 case VMX_EXIT_VMPTRLD:
5964 case VMX_EXIT_VMPTRST:
5965 case VMX_EXIT_VMXON:
5966 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5967 case VMX_EXIT_LDTR_TR_ACCESS:
5968 case VMX_EXIT_RDRAND:
5969 case VMX_EXIT_RDSEED:
5970 case VMX_EXIT_XSAVES:
5971 case VMX_EXIT_XRSTORS:
5972 case VMX_EXIT_UMWAIT:
5973 case VMX_EXIT_TPAUSE:
5974 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5975
5976 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5977 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5978 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5979 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5980 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5981 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5982 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5983 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5984 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5985 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5986 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5987 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5988 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5989 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5990 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5991 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5992 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5993 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5994 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5995
5996 case VMX_EXIT_PREEMPT_TIMER:
5997 {
5998 /** @todo NSTVMX: Preempt timer. */
5999 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
6000 }
6001
6002 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
6003 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
6004
6005 case VMX_EXIT_VMREAD:
6006 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
6007
6008 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
6009 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
6010
6011 case VMX_EXIT_INIT_SIGNAL:
6012 case VMX_EXIT_SIPI:
6013 case VMX_EXIT_IO_SMI:
6014 case VMX_EXIT_SMI:
6015 case VMX_EXIT_ERR_MSR_LOAD:
6016 case VMX_EXIT_ERR_MACHINE_CHECK:
6017 case VMX_EXIT_PML_FULL:
6018 case VMX_EXIT_RSM:
6019 default:
6020 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6021 }
6022}
6023#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6024
6025
6026/** @name VM-exit helpers.
6027 * @{
6028 */
6029/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6030/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6031/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6032
6033/** Macro for VM-exits called unexpectedly. */
6034#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
6035 do { \
6036 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6037 return VERR_VMX_UNEXPECTED_EXIT; \
6038 } while (0)
6039
6040#ifdef VBOX_STRICT
6041# ifndef IN_NEM_DARWIN
6042/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6043# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6044 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6045
6046# define HMVMX_ASSERT_PREEMPT_CPUID() \
6047 do { \
6048 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6049 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6050 } while (0)
6051
6052# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6053 do { \
6054 AssertPtr((a_pVCpu)); \
6055 AssertPtr((a_pVmxTransient)); \
6056 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6057 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6058 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6059 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6060 Assert((a_pVmxTransient)->pVmcsInfo); \
6061 Assert(ASMIntAreEnabled()); \
6062 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6063 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6064 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6065 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6066 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6067 HMVMX_ASSERT_PREEMPT_CPUID(); \
6068 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6069 } while (0)
6070# else
6071# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6072# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6073# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6074 do { \
6075 AssertPtr((a_pVCpu)); \
6076 AssertPtr((a_pVmxTransient)); \
6077 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6078 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6079 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6080 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6081 Assert((a_pVmxTransient)->pVmcsInfo); \
6082 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6083 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6084 } while (0)
6085# endif
6086
6087# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6088 do { \
6089 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6090 Assert((a_pVmxTransient)->fIsNestedGuest); \
6091 } while (0)
6092
6093# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6094 do { \
6095 Log4Func(("\n")); \
6096 } while (0)
6097#else
6098# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6099 do { \
6100 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6101 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6102 } while (0)
6103
6104# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6105 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6106
6107# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6108#endif
6109
6110#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6111/** Macro that does the necessary privilege checks and intercepted VM-exits for
6112 * guests that attempted to execute a VMX instruction. */
6113# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6114 do \
6115 { \
6116 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6117 if (rcStrictTmp == VINF_SUCCESS) \
6118 { /* likely */ } \
6119 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6120 { \
6121 Assert((a_pVCpu)->hm.s.Event.fPending); \
6122 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6123 return VINF_SUCCESS; \
6124 } \
6125 else \
6126 { \
6127 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6128 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6129 } \
6130 } while (0)
6131
6132/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6133# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6134 do \
6135 { \
6136 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6137 (a_pGCPtrEffAddr)); \
6138 if (rcStrictTmp == VINF_SUCCESS) \
6139 { /* likely */ } \
6140 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6141 { \
6142 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6143 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6144 NOREF(uXcptTmp); \
6145 return VINF_SUCCESS; \
6146 } \
6147 else \
6148 { \
6149 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6150 return rcStrictTmp; \
6151 } \
6152 } while (0)
6153#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6154
6155
6156/**
6157 * Advances the guest RIP by the specified number of bytes.
6158 *
6159 * @param pVCpu The cross context virtual CPU structure.
6160 * @param cbInstr Number of bytes to advance the RIP by.
6161 *
6162 * @remarks No-long-jump zone!!!
6163 */
6164DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6165{
6166 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
6167
6168 /*
6169 * Advance RIP.
6170 *
6171 * The upper 32 bits are only set when in 64-bit mode, so we have to detect
6172 * when the addition causes a "carry" into the upper half and check whether
6173 * we're in 64-bit and can go on with it or wether we should zap the top
6174 * half. (Note! The 8086, 80186 and 80286 emulation is done exclusively in
6175 * IEM, so we don't need to bother with pre-386 16-bit wraparound.)
6176 *
6177 * See PC wrap around tests in bs3-cpu-weird-1.
6178 */
6179 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
6180 uint64_t const uRipNext = uRipPrev + cbInstr;
6181 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & RT_BIT_64(32))
6182 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
6183 pVCpu->cpum.GstCtx.rip = uRipNext;
6184 else
6185 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
6186
6187 /*
6188 * Clear RF and interrupt shadowing.
6189 */
6190 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF))))
6191 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
6192 else
6193 {
6194 if ((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF)) == X86_EFL_TF)
6195 {
6196 /** @todo \#DB - single step. */
6197 }
6198 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
6199 }
6200 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
6201
6202 /* Mark both RIP and RFLAGS as updated. */
6203 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6204}
6205
6206
6207/**
6208 * Advances the guest RIP after reading it from the VMCS.
6209 *
6210 * @returns VBox status code, no informational status codes.
6211 * @param pVCpu The cross context virtual CPU structure.
6212 * @param pVmxTransient The VMX-transient structure.
6213 *
6214 * @remarks No-long-jump zone!!!
6215 */
6216static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6217{
6218 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6219 /** @todo consider template here after checking callers. */
6220 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6221 AssertRCReturn(rc, rc);
6222
6223 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6224 return VINF_SUCCESS;
6225}
6226
6227
6228/**
6229 * Handle a condition that occurred while delivering an event through the guest or
6230 * nested-guest IDT.
6231 *
6232 * @returns Strict VBox status code (i.e. informational status codes too).
6233 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6234 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6235 * to continue execution of the guest which will delivery the \#DF.
6236 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6237 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6238 *
6239 * @param pVCpu The cross context virtual CPU structure.
6240 * @param pVmxTransient The VMX-transient structure.
6241 *
6242 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6243 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6244 * is due to an EPT violation, PML full or SPP-related event.
6245 *
6246 * @remarks No-long-jump zone!!!
6247 */
6248static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6249{
6250 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6251 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6252 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6253 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6254 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6255 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6256
6257 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6258 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6259 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6260 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6261 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6262 {
6263 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6264 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6265
6266 /*
6267 * If the event was a software interrupt (generated with INT n) or a software exception
6268 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6269 * can handle the VM-exit and continue guest execution which will re-execute the
6270 * instruction rather than re-injecting the exception, as that can cause premature
6271 * trips to ring-3 before injection and involve TRPM which currently has no way of
6272 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6273 * the problem).
6274 */
6275 IEMXCPTRAISE enmRaise;
6276 IEMXCPTRAISEINFO fRaiseInfo;
6277 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6278 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6279 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6280 {
6281 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6282 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6283 }
6284 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6285 {
6286 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6287 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6288 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6289
6290 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6291 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6292
6293 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6294
6295 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6296 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6297 {
6298 pVmxTransient->fVectoringPF = true;
6299 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6300 }
6301 }
6302 else
6303 {
6304 /*
6305 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6306 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6307 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6308 */
6309 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6310 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6311 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6312 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6313 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6314 }
6315
6316 /*
6317 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6318 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6319 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6320 * subsequent VM-entry would fail, see @bugref{7445}.
6321 *
6322 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6323 */
6324 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6325 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6326 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6327 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6328 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6329
6330 switch (enmRaise)
6331 {
6332 case IEMXCPTRAISE_CURRENT_XCPT:
6333 {
6334 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6335 Assert(rcStrict == VINF_SUCCESS);
6336 break;
6337 }
6338
6339 case IEMXCPTRAISE_PREV_EVENT:
6340 {
6341 uint32_t u32ErrCode;
6342 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6343 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6344 else
6345 u32ErrCode = 0;
6346
6347 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6348 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6349 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6350 pVCpu->cpum.GstCtx.cr2);
6351
6352 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6353 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6354 Assert(rcStrict == VINF_SUCCESS);
6355 break;
6356 }
6357
6358 case IEMXCPTRAISE_REEXEC_INSTR:
6359 Assert(rcStrict == VINF_SUCCESS);
6360 break;
6361
6362 case IEMXCPTRAISE_DOUBLE_FAULT:
6363 {
6364 /*
6365 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6366 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6367 */
6368 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6369 {
6370 pVmxTransient->fVectoringDoublePF = true;
6371 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6372 pVCpu->cpum.GstCtx.cr2));
6373 rcStrict = VINF_SUCCESS;
6374 }
6375 else
6376 {
6377 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6378 vmxHCSetPendingXcptDF(pVCpu);
6379 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6380 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6381 rcStrict = VINF_HM_DOUBLE_FAULT;
6382 }
6383 break;
6384 }
6385
6386 case IEMXCPTRAISE_TRIPLE_FAULT:
6387 {
6388 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6389 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6390 rcStrict = VINF_EM_RESET;
6391 break;
6392 }
6393
6394 case IEMXCPTRAISE_CPU_HANG:
6395 {
6396 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6397 rcStrict = VERR_EM_GUEST_CPU_HANG;
6398 break;
6399 }
6400
6401 default:
6402 {
6403 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6404 rcStrict = VERR_VMX_IPE_2;
6405 break;
6406 }
6407 }
6408 }
6409 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6410 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6411 {
6412 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6413 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6414 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6415 {
6416 /*
6417 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6418 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6419 * that virtual NMIs remain blocked until the IRET execution is completed.
6420 *
6421 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6422 */
6423 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6424 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6425 }
6426 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6427 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6428 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6429 {
6430 /*
6431 * Execution of IRET caused an EPT violation, page-modification log-full event or
6432 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6433 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6434 * that virtual NMIs remain blocked until the IRET execution is completed.
6435 *
6436 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6437 */
6438 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6439 {
6440 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6441 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6442 }
6443 }
6444 }
6445
6446 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6447 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6448 return rcStrict;
6449}
6450
6451
6452#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6453/**
6454 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6455 * guest attempting to execute a VMX instruction.
6456 *
6457 * @returns Strict VBox status code (i.e. informational status codes too).
6458 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6459 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6460 *
6461 * @param pVCpu The cross context virtual CPU structure.
6462 * @param uExitReason The VM-exit reason.
6463 *
6464 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6465 * @remarks No-long-jump zone!!!
6466 */
6467static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6468{
6469 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6470 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6471
6472 /*
6473 * The physical CPU would have already checked the CPU mode/code segment.
6474 * We shall just assert here for paranoia.
6475 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6476 */
6477 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6478 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6479 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6480
6481 if (uExitReason == VMX_EXIT_VMXON)
6482 {
6483 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6484
6485 /*
6486 * We check CR4.VMXE because it is required to be always set while in VMX operation
6487 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6488 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6489 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6490 */
6491 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6492 {
6493 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6494 vmxHCSetPendingXcptUD(pVCpu);
6495 return VINF_HM_PENDING_XCPT;
6496 }
6497 }
6498 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6499 {
6500 /*
6501 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6502 * (other than VMXON), we need to raise a #UD.
6503 */
6504 Log4Func(("Not in VMX root mode -> #UD\n"));
6505 vmxHCSetPendingXcptUD(pVCpu);
6506 return VINF_HM_PENDING_XCPT;
6507 }
6508
6509 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6510 return VINF_SUCCESS;
6511}
6512
6513
6514/**
6515 * Decodes the memory operand of an instruction that caused a VM-exit.
6516 *
6517 * The Exit qualification field provides the displacement field for memory
6518 * operand instructions, if any.
6519 *
6520 * @returns Strict VBox status code (i.e. informational status codes too).
6521 * @retval VINF_SUCCESS if the operand was successfully decoded.
6522 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6523 * operand.
6524 * @param pVCpu The cross context virtual CPU structure.
6525 * @param uExitInstrInfo The VM-exit instruction information field.
6526 * @param enmMemAccess The memory operand's access type (read or write).
6527 * @param GCPtrDisp The instruction displacement field, if any. For
6528 * RIP-relative addressing pass RIP + displacement here.
6529 * @param pGCPtrMem Where to store the effective destination memory address.
6530 *
6531 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6532 * virtual-8086 mode hence skips those checks while verifying if the
6533 * segment is valid.
6534 */
6535static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6536 PRTGCPTR pGCPtrMem)
6537{
6538 Assert(pGCPtrMem);
6539 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6540 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6541 | CPUMCTX_EXTRN_CR0);
6542
6543 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6544 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6545 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6546
6547 VMXEXITINSTRINFO ExitInstrInfo;
6548 ExitInstrInfo.u = uExitInstrInfo;
6549 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6550 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6551 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6552 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6553 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6554 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6555 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6556 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6557 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6558
6559 /*
6560 * Validate instruction information.
6561 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6562 */
6563 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6564 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6565 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6566 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6567 AssertLogRelMsgReturn(fIsMemOperand,
6568 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6569
6570 /*
6571 * Compute the complete effective address.
6572 *
6573 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6574 * See AMD spec. 4.5.2 "Segment Registers".
6575 */
6576 RTGCPTR GCPtrMem = GCPtrDisp;
6577 if (fBaseRegValid)
6578 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6579 if (fIdxRegValid)
6580 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6581
6582 RTGCPTR const GCPtrOff = GCPtrMem;
6583 if ( !fIsLongMode
6584 || iSegReg >= X86_SREG_FS)
6585 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6586 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6587
6588 /*
6589 * Validate effective address.
6590 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6591 */
6592 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6593 Assert(cbAccess > 0);
6594 if (fIsLongMode)
6595 {
6596 if (X86_IS_CANONICAL(GCPtrMem))
6597 {
6598 *pGCPtrMem = GCPtrMem;
6599 return VINF_SUCCESS;
6600 }
6601
6602 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6603 * "Data Limit Checks in 64-bit Mode". */
6604 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6605 vmxHCSetPendingXcptGP(pVCpu, 0);
6606 return VINF_HM_PENDING_XCPT;
6607 }
6608
6609 /*
6610 * This is a watered down version of iemMemApplySegment().
6611 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6612 * and segment CPL/DPL checks are skipped.
6613 */
6614 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6615 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6616 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6617
6618 /* Check if the segment is present and usable. */
6619 if ( pSel->Attr.n.u1Present
6620 && !pSel->Attr.n.u1Unusable)
6621 {
6622 Assert(pSel->Attr.n.u1DescType);
6623 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6624 {
6625 /* Check permissions for the data segment. */
6626 if ( enmMemAccess == VMXMEMACCESS_WRITE
6627 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6628 {
6629 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6630 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6631 return VINF_HM_PENDING_XCPT;
6632 }
6633
6634 /* Check limits if it's a normal data segment. */
6635 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6636 {
6637 if ( GCPtrFirst32 > pSel->u32Limit
6638 || GCPtrLast32 > pSel->u32Limit)
6639 {
6640 Log4Func(("Data segment limit exceeded. "
6641 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6642 GCPtrLast32, pSel->u32Limit));
6643 if (iSegReg == X86_SREG_SS)
6644 vmxHCSetPendingXcptSS(pVCpu, 0);
6645 else
6646 vmxHCSetPendingXcptGP(pVCpu, 0);
6647 return VINF_HM_PENDING_XCPT;
6648 }
6649 }
6650 else
6651 {
6652 /* Check limits if it's an expand-down data segment.
6653 Note! The upper boundary is defined by the B bit, not the G bit! */
6654 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6655 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6656 {
6657 Log4Func(("Expand-down data segment limit exceeded. "
6658 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6659 GCPtrLast32, pSel->u32Limit));
6660 if (iSegReg == X86_SREG_SS)
6661 vmxHCSetPendingXcptSS(pVCpu, 0);
6662 else
6663 vmxHCSetPendingXcptGP(pVCpu, 0);
6664 return VINF_HM_PENDING_XCPT;
6665 }
6666 }
6667 }
6668 else
6669 {
6670 /* Check permissions for the code segment. */
6671 if ( enmMemAccess == VMXMEMACCESS_WRITE
6672 || ( enmMemAccess == VMXMEMACCESS_READ
6673 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6674 {
6675 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6676 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6677 vmxHCSetPendingXcptGP(pVCpu, 0);
6678 return VINF_HM_PENDING_XCPT;
6679 }
6680
6681 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6682 if ( GCPtrFirst32 > pSel->u32Limit
6683 || GCPtrLast32 > pSel->u32Limit)
6684 {
6685 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6686 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6687 if (iSegReg == X86_SREG_SS)
6688 vmxHCSetPendingXcptSS(pVCpu, 0);
6689 else
6690 vmxHCSetPendingXcptGP(pVCpu, 0);
6691 return VINF_HM_PENDING_XCPT;
6692 }
6693 }
6694 }
6695 else
6696 {
6697 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6698 vmxHCSetPendingXcptGP(pVCpu, 0);
6699 return VINF_HM_PENDING_XCPT;
6700 }
6701
6702 *pGCPtrMem = GCPtrMem;
6703 return VINF_SUCCESS;
6704}
6705#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6706
6707
6708/**
6709 * VM-exit helper for LMSW.
6710 */
6711static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6712{
6713 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6714 AssertRCReturn(rc, rc);
6715
6716 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6717 AssertMsg( rcStrict == VINF_SUCCESS
6718 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6719
6720 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6721 if (rcStrict == VINF_IEM_RAISED_XCPT)
6722 {
6723 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6724 rcStrict = VINF_SUCCESS;
6725 }
6726
6727 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6728 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6729 return rcStrict;
6730}
6731
6732
6733/**
6734 * VM-exit helper for CLTS.
6735 */
6736static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6737{
6738 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6739 AssertRCReturn(rc, rc);
6740
6741 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6742 AssertMsg( rcStrict == VINF_SUCCESS
6743 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6744
6745 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6746 if (rcStrict == VINF_IEM_RAISED_XCPT)
6747 {
6748 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6749 rcStrict = VINF_SUCCESS;
6750 }
6751
6752 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6753 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6754 return rcStrict;
6755}
6756
6757
6758/**
6759 * VM-exit helper for MOV from CRx (CRx read).
6760 */
6761static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6762{
6763 Assert(iCrReg < 16);
6764 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6765
6766 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6767 AssertRCReturn(rc, rc);
6768
6769 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6770 AssertMsg( rcStrict == VINF_SUCCESS
6771 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6772
6773 if (iGReg == X86_GREG_xSP)
6774 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6775 else
6776 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6777#ifdef VBOX_WITH_STATISTICS
6778 switch (iCrReg)
6779 {
6780 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6781 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6782 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6783 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6784 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6785 }
6786#endif
6787 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6788 return rcStrict;
6789}
6790
6791
6792/**
6793 * VM-exit helper for MOV to CRx (CRx write).
6794 */
6795static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6796{
6797 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6798
6799 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6800 AssertMsg( rcStrict == VINF_SUCCESS
6801 || rcStrict == VINF_IEM_RAISED_XCPT
6802 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6803
6804 switch (iCrReg)
6805 {
6806 case 0:
6807 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6808 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6809 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6810 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6811 break;
6812
6813 case 2:
6814 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6815 /* Nothing to do here, CR2 it's not part of the VMCS. */
6816 break;
6817
6818 case 3:
6819 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6820 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6821 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6822 break;
6823
6824 case 4:
6825 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6826 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6827#ifndef IN_NEM_DARWIN
6828 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6829 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6830#else
6831 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6832#endif
6833 break;
6834
6835 case 8:
6836 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6837 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6838 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6839 break;
6840
6841 default:
6842 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6843 break;
6844 }
6845
6846 if (rcStrict == VINF_IEM_RAISED_XCPT)
6847 {
6848 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6849 rcStrict = VINF_SUCCESS;
6850 }
6851 return rcStrict;
6852}
6853
6854
6855/**
6856 * VM-exit exception handler for \#PF (Page-fault exception).
6857 *
6858 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6859 */
6860static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6861{
6862 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6863 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6864
6865#ifndef IN_NEM_DARWIN
6866 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6867 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6868 { /* likely */ }
6869 else
6870#endif
6871 {
6872#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6873 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6874#endif
6875 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6876 if (!pVmxTransient->fVectoringDoublePF)
6877 {
6878 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6879 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6880 }
6881 else
6882 {
6883 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6884 Assert(!pVmxTransient->fIsNestedGuest);
6885 vmxHCSetPendingXcptDF(pVCpu);
6886 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6887 }
6888 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6889 return VINF_SUCCESS;
6890 }
6891
6892 Assert(!pVmxTransient->fIsNestedGuest);
6893
6894 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6895 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6896 if (pVmxTransient->fVectoringPF)
6897 {
6898 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6899 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6900 }
6901
6902 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6903 AssertRCReturn(rc, rc);
6904
6905 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
6906 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
6907
6908 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6909 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
6910
6911 Log4Func(("#PF: rc=%Rrc\n", rc));
6912 if (rc == VINF_SUCCESS)
6913 {
6914 /*
6915 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6916 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6917 */
6918 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6919 TRPMResetTrap(pVCpu);
6920 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6921 return rc;
6922 }
6923
6924 if (rc == VINF_EM_RAW_GUEST_TRAP)
6925 {
6926 if (!pVmxTransient->fVectoringDoublePF)
6927 {
6928 /* It's a guest page fault and needs to be reflected to the guest. */
6929 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6930 TRPMResetTrap(pVCpu);
6931 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6932 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6933 uGstErrorCode, pVmxTransient->uExitQual);
6934 }
6935 else
6936 {
6937 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6938 TRPMResetTrap(pVCpu);
6939 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6940 vmxHCSetPendingXcptDF(pVCpu);
6941 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6942 }
6943
6944 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6945 return VINF_SUCCESS;
6946 }
6947
6948 TRPMResetTrap(pVCpu);
6949 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6950 return rc;
6951}
6952
6953
6954/**
6955 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6956 *
6957 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6958 */
6959static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6960{
6961 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6962 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6963
6964 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6965 AssertRCReturn(rc, rc);
6966
6967 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6968 {
6969 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6970 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6971
6972 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6973 * provides VM-exit instruction length. If this causes problem later,
6974 * disassemble the instruction like it's done on AMD-V. */
6975 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6976 AssertRCReturn(rc2, rc2);
6977 return rc;
6978 }
6979
6980 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6981 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6982 return VINF_SUCCESS;
6983}
6984
6985
6986/**
6987 * VM-exit exception handler for \#BP (Breakpoint exception).
6988 *
6989 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6990 */
6991static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6992{
6993 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6994 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6995
6996 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6997 AssertRCReturn(rc, rc);
6998
6999 VBOXSTRICTRC rcStrict;
7000 if (!pVmxTransient->fIsNestedGuest)
7001 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
7002 else
7003 rcStrict = VINF_EM_RAW_GUEST_TRAP;
7004
7005 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
7006 {
7007 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7008 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7009 rcStrict = VINF_SUCCESS;
7010 }
7011
7012 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
7013 return rcStrict;
7014}
7015
7016
7017/**
7018 * VM-exit exception handler for \#AC (Alignment-check exception).
7019 *
7020 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7021 */
7022static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7023{
7024 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7025
7026 /*
7027 * Detect #ACs caused by host having enabled split-lock detection.
7028 * Emulate such instructions.
7029 */
7030#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
7031 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7032 AssertRCReturn(rc, rc);
7033 /** @todo detect split lock in cpu feature? */
7034 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
7035 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7036 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
7037 || CPUMGetGuestCPL(pVCpu) != 3
7038 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
7039 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
7040 {
7041 /*
7042 * Check for debug/trace events and import state accordingly.
7043 */
7044 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
7045 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7046 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7047#ifndef IN_NEM_DARWIN
7048 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7049#endif
7050 )
7051 {
7052 if (pVM->cCpus == 1)
7053 {
7054#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7055 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7056 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7057#else
7058 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7059 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7060#endif
7061 AssertRCReturn(rc, rc);
7062 }
7063 }
7064 else
7065 {
7066 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7067 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7068 AssertRCReturn(rc, rc);
7069
7070 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7071
7072 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7073 {
7074 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7075 if (rcStrict != VINF_SUCCESS)
7076 return rcStrict;
7077 }
7078 }
7079
7080 /*
7081 * Emulate the instruction.
7082 *
7083 * We have to ignore the LOCK prefix here as we must not retrigger the
7084 * detection on the host. This isn't all that satisfactory, though...
7085 */
7086 if (pVM->cCpus == 1)
7087 {
7088 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7089 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7090
7091 /** @todo For SMP configs we should do a rendezvous here. */
7092 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7093 if (rcStrict == VINF_SUCCESS)
7094#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7095 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7096 HM_CHANGED_GUEST_RIP
7097 | HM_CHANGED_GUEST_RFLAGS
7098 | HM_CHANGED_GUEST_GPRS_MASK
7099 | HM_CHANGED_GUEST_CS
7100 | HM_CHANGED_GUEST_SS);
7101#else
7102 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7103#endif
7104 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7105 {
7106 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7107 rcStrict = VINF_SUCCESS;
7108 }
7109 return rcStrict;
7110 }
7111 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7112 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7113 return VINF_EM_EMULATE_SPLIT_LOCK;
7114 }
7115
7116 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7117 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7118 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7119
7120 /* Re-inject it. We'll detect any nesting before getting here. */
7121 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7122 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7123 return VINF_SUCCESS;
7124}
7125
7126
7127/**
7128 * VM-exit exception handler for \#DB (Debug exception).
7129 *
7130 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7131 */
7132static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7133{
7134 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7135 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7136
7137 /*
7138 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7139 */
7140 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7141
7142 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7143 uint64_t const uDR6 = X86_DR6_INIT_VAL
7144 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7145 | X86_DR6_BD | X86_DR6_BS));
7146 Log6Func(("uDR6=%#RX64 uExitQual=%#RX64\n", uDR6, pVmxTransient->uExitQual));
7147
7148 int rc;
7149 if (!pVmxTransient->fIsNestedGuest)
7150 {
7151 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7152
7153 /*
7154 * Prevents stepping twice over the same instruction when the guest is stepping using
7155 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7156 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7157 */
7158 if ( rc == VINF_EM_DBG_STEPPED
7159 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7160 {
7161 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7162 rc = VINF_EM_RAW_GUEST_TRAP;
7163 }
7164 }
7165 else
7166 rc = VINF_EM_RAW_GUEST_TRAP;
7167 Log6Func(("rc=%Rrc\n", rc));
7168 if (rc == VINF_EM_RAW_GUEST_TRAP)
7169 {
7170 /*
7171 * The exception was for the guest. Update DR6, DR7.GD and
7172 * IA32_DEBUGCTL.LBR before forwarding it.
7173 * See Intel spec. 27.1 "Architectural State before a VM-Exit"
7174 * and @sdmv3{077,622,17.2.3,Debug Status Register (DR6)}.
7175 */
7176#ifndef IN_NEM_DARWIN
7177 VMMRZCallRing3Disable(pVCpu);
7178 HM_DISABLE_PREEMPT(pVCpu);
7179
7180 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7181 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7182 if (CPUMIsGuestDebugStateActive(pVCpu))
7183 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7184
7185 HM_RESTORE_PREEMPT();
7186 VMMRZCallRing3Enable(pVCpu);
7187#else
7188 /** @todo */
7189#endif
7190
7191 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7192 AssertRCReturn(rc, rc);
7193
7194 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7195 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7196
7197 /* Paranoia. */
7198 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7199 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7200
7201 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7202 AssertRC(rc);
7203
7204 /*
7205 * Raise #DB in the guest.
7206 *
7207 * It is important to reflect exactly what the VM-exit gave us (preserving the
7208 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7209 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7210 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7211 *
7212 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7213 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7214 */
7215 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7216 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7217 return VINF_SUCCESS;
7218 }
7219
7220 /*
7221 * Not a guest trap, must be a hypervisor related debug event then.
7222 * Update DR6 in case someone is interested in it.
7223 */
7224 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7225 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7226 CPUMSetHyperDR6(pVCpu, uDR6);
7227
7228 return rc;
7229}
7230
7231
7232/**
7233 * Hacks its way around the lovely mesa driver's backdoor accesses.
7234 *
7235 * @sa hmR0SvmHandleMesaDrvGp.
7236 */
7237static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7238{
7239 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7240 RT_NOREF(pCtx);
7241
7242 /* For now we'll just skip the instruction. */
7243 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7244}
7245
7246
7247/**
7248 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7249 * backdoor logging w/o checking what it is running inside.
7250 *
7251 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7252 * backdoor port and magic numbers loaded in registers.
7253 *
7254 * @returns true if it is, false if it isn't.
7255 * @sa hmR0SvmIsMesaDrvGp.
7256 */
7257DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7258{
7259 /* 0xed: IN eAX,dx */
7260 uint8_t abInstr[1];
7261 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7262 return false;
7263
7264 /* Check that it is #GP(0). */
7265 if (pVmxTransient->uExitIntErrorCode != 0)
7266 return false;
7267
7268 /* Check magic and port. */
7269 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7270 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7271 if (pCtx->rax != UINT32_C(0x564d5868))
7272 return false;
7273 if (pCtx->dx != UINT32_C(0x5658))
7274 return false;
7275
7276 /* Flat ring-3 CS. */
7277 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7278 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7279 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7280 if (pCtx->cs.Attr.n.u2Dpl != 3)
7281 return false;
7282 if (pCtx->cs.u64Base != 0)
7283 return false;
7284
7285 /* Check opcode. */
7286 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7287 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7288 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7289 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7290 if (RT_FAILURE(rc))
7291 return false;
7292 if (abInstr[0] != 0xed)
7293 return false;
7294
7295 return true;
7296}
7297
7298
7299/**
7300 * VM-exit exception handler for \#GP (General-protection exception).
7301 *
7302 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7303 */
7304static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7305{
7306 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7307 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7308
7309 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7310 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7311#ifndef IN_NEM_DARWIN
7312 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7313 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7314 { /* likely */ }
7315 else
7316#endif
7317 {
7318#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7319# ifndef IN_NEM_DARWIN
7320 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7321# else
7322 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7323# endif
7324#endif
7325 /*
7326 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7327 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7328 */
7329 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7330 AssertRCReturn(rc, rc);
7331 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7332 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7333
7334 if ( pVmxTransient->fIsNestedGuest
7335 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7336 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7337 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7338 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7339 else
7340 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7341 return rc;
7342 }
7343
7344#ifndef IN_NEM_DARWIN
7345 Assert(CPUMIsGuestInRealModeEx(pCtx));
7346 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7347 Assert(!pVmxTransient->fIsNestedGuest);
7348
7349 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7350 AssertRCReturn(rc, rc);
7351
7352 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7353 if (rcStrict == VINF_SUCCESS)
7354 {
7355 if (!CPUMIsGuestInRealModeEx(pCtx))
7356 {
7357 /*
7358 * The guest is no longer in real-mode, check if we can continue executing the
7359 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7360 */
7361 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7362 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7363 {
7364 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7365 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7366 }
7367 else
7368 {
7369 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7370 rcStrict = VINF_EM_RESCHEDULE;
7371 }
7372 }
7373 else
7374 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7375 }
7376 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7377 {
7378 rcStrict = VINF_SUCCESS;
7379 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7380 }
7381 return VBOXSTRICTRC_VAL(rcStrict);
7382#endif
7383}
7384
7385
7386/**
7387 * VM-exit exception handler for \#DE (Divide Error).
7388 *
7389 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7390 */
7391static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7392{
7393 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7394 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7395
7396 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7397 AssertRCReturn(rc, rc);
7398
7399 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7400 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7401 {
7402 uint8_t cbInstr = 0;
7403 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7404 if (rc2 == VINF_SUCCESS)
7405 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7406 else if (rc2 == VERR_NOT_FOUND)
7407 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7408 else
7409 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7410 }
7411 else
7412 rcStrict = VINF_SUCCESS; /* Do nothing. */
7413
7414 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7415 if (RT_FAILURE(rcStrict))
7416 {
7417 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7418 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7419 rcStrict = VINF_SUCCESS;
7420 }
7421
7422 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7423 return VBOXSTRICTRC_VAL(rcStrict);
7424}
7425
7426
7427/**
7428 * VM-exit exception handler wrapper for all other exceptions that are not handled
7429 * by a specific handler.
7430 *
7431 * This simply re-injects the exception back into the VM without any special
7432 * processing.
7433 *
7434 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7435 */
7436static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7437{
7438 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7439
7440#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7441# ifndef IN_NEM_DARWIN
7442 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7443 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7444 ("uVector=%#x u32XcptBitmap=%#X32\n",
7445 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7446 NOREF(pVmcsInfo);
7447# endif
7448#endif
7449
7450 /*
7451 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7452 * would have been handled while checking exits due to event delivery.
7453 */
7454 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7455
7456#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7457 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7458 AssertRCReturn(rc, rc);
7459 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7460#endif
7461
7462#ifdef VBOX_WITH_STATISTICS
7463 switch (uVector)
7464 {
7465 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7466 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7467 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7468 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7469 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7470 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7471 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7472 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7473 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7474 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7475 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7476 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7477 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7478 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7479 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7480 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7481 default:
7482 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7483 break;
7484 }
7485#endif
7486
7487 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7488 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7489 NOREF(uVector);
7490
7491 /* Re-inject the original exception into the guest. */
7492 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7493 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7494 return VINF_SUCCESS;
7495}
7496
7497
7498/**
7499 * VM-exit exception handler for all exceptions (except NMIs!).
7500 *
7501 * @remarks This may be called for both guests and nested-guests. Take care to not
7502 * make assumptions and avoid doing anything that is not relevant when
7503 * executing a nested-guest (e.g., Mesa driver hacks).
7504 */
7505static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7506{
7507 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7508
7509 /*
7510 * If this VM-exit occurred while delivering an event through the guest IDT, take
7511 * action based on the return code and additional hints (e.g. for page-faults)
7512 * that will be updated in the VMX transient structure.
7513 */
7514 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7515 if (rcStrict == VINF_SUCCESS)
7516 {
7517 /*
7518 * If an exception caused a VM-exit due to delivery of an event, the original
7519 * event may have to be re-injected into the guest. We shall reinject it and
7520 * continue guest execution. However, page-fault is a complicated case and
7521 * needs additional processing done in vmxHCExitXcptPF().
7522 */
7523 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7524 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7525 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7526 || uVector == X86_XCPT_PF)
7527 {
7528 switch (uVector)
7529 {
7530 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7531 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7532 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7533 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7534 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7535 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7536 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7537 default:
7538 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7539 }
7540 }
7541 /* else: inject pending event before resuming guest execution. */
7542 }
7543 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7544 {
7545 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7546 rcStrict = VINF_SUCCESS;
7547 }
7548
7549 return rcStrict;
7550}
7551/** @} */
7552
7553
7554/** @name VM-exit handlers.
7555 * @{
7556 */
7557/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7558/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7559/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7560
7561/**
7562 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7563 */
7564HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7565{
7566 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7567 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7568
7569#ifndef IN_NEM_DARWIN
7570 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7571 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7572 return VINF_SUCCESS;
7573 return VINF_EM_RAW_INTERRUPT;
7574#else
7575 return VINF_SUCCESS;
7576#endif
7577}
7578
7579
7580/**
7581 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7582 * VM-exit.
7583 */
7584HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7585{
7586 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7587 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7588
7589 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7590
7591 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7592 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7593 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7594
7595 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7596 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7597 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7598 NOREF(pVmcsInfo);
7599
7600 VBOXSTRICTRC rcStrict;
7601 switch (uExitIntType)
7602 {
7603#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7604 /*
7605 * Host physical NMIs:
7606 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7607 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7608 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7609 *
7610 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7611 * See Intel spec. 27.5.5 "Updating Non-Register State".
7612 */
7613 case VMX_EXIT_INT_INFO_TYPE_NMI:
7614 {
7615 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7616 break;
7617 }
7618#endif
7619
7620 /*
7621 * Privileged software exceptions (#DB from ICEBP),
7622 * Software exceptions (#BP and #OF),
7623 * Hardware exceptions:
7624 * Process the required exceptions and resume guest execution if possible.
7625 */
7626 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7627 Assert(uVector == X86_XCPT_DB);
7628 RT_FALL_THRU();
7629 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7630 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7631 RT_FALL_THRU();
7632 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7633 {
7634 NOREF(uVector);
7635 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7636 | HMVMX_READ_EXIT_INSTR_LEN
7637 | HMVMX_READ_IDT_VECTORING_INFO
7638 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7639 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7640 break;
7641 }
7642
7643 default:
7644 {
7645 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7646 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7647 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7648 break;
7649 }
7650 }
7651
7652 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7653 return rcStrict;
7654}
7655
7656
7657/**
7658 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7659 */
7660HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7661{
7662 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7663
7664 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7665 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7666 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7667
7668 /* Evaluate and deliver pending events and resume guest execution. */
7669 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7670 return VINF_SUCCESS;
7671}
7672
7673
7674/**
7675 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7676 */
7677HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7678{
7679 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7680
7681 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7682 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7683 {
7684 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7685 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7686 }
7687
7688 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7689
7690 /*
7691 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7692 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7693 */
7694 uint32_t fIntrState;
7695 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7696 AssertRC(rc);
7697 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7698 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7699 {
7700 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7701
7702 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7703 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7704 AssertRC(rc);
7705 }
7706
7707 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7708 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7709
7710 /* Evaluate and deliver pending events and resume guest execution. */
7711 return VINF_SUCCESS;
7712}
7713
7714
7715/**
7716 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7717 */
7718HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7719{
7720 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7721 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7722}
7723
7724
7725/**
7726 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7727 */
7728HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7729{
7730 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7731 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7732}
7733
7734
7735/**
7736 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7737 */
7738HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7739{
7740 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7741
7742 /*
7743 * Get the state we need and update the exit history entry.
7744 */
7745 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7746 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7747 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7748 AssertRCReturn(rc, rc);
7749
7750 VBOXSTRICTRC rcStrict;
7751 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7752 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7753 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7754 if (!pExitRec)
7755 {
7756 /*
7757 * Regular CPUID instruction execution.
7758 */
7759 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7760 if (rcStrict == VINF_SUCCESS)
7761 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7762 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7763 {
7764 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7765 rcStrict = VINF_SUCCESS;
7766 }
7767 }
7768 else
7769 {
7770 /*
7771 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7772 */
7773 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7774 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7775 AssertRCReturn(rc2, rc2);
7776
7777 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7778 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7779
7780 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7781 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7782
7783 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7784 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7785 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7786 }
7787 return rcStrict;
7788}
7789
7790
7791/**
7792 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7793 */
7794HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7795{
7796 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7797
7798 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7799 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7800 AssertRCReturn(rc, rc);
7801
7802 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7803 return VINF_EM_RAW_EMULATE_INSTR;
7804
7805 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7806 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7807}
7808
7809
7810/**
7811 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7812 */
7813HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7814{
7815 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7816
7817 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7818 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7819 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7820 AssertRCReturn(rc, rc);
7821
7822 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7823 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7824 {
7825 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7826 we must reset offsetting on VM-entry. See @bugref{6634}. */
7827 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7828 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7829 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7830 }
7831 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7832 {
7833 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7834 rcStrict = VINF_SUCCESS;
7835 }
7836 return rcStrict;
7837}
7838
7839
7840/**
7841 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7842 */
7843HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7844{
7845 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7846
7847 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7848 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7849 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7850 AssertRCReturn(rc, rc);
7851
7852 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7853 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7854 {
7855 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7856 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7857 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7858 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7859 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7860 }
7861 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7862 {
7863 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7864 rcStrict = VINF_SUCCESS;
7865 }
7866 return rcStrict;
7867}
7868
7869
7870/**
7871 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7872 */
7873HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7874{
7875 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7876
7877 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7878 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7879 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7880 AssertRCReturn(rc, rc);
7881
7882 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
7883 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7884 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7885 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7886 {
7887 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7888 rcStrict = VINF_SUCCESS;
7889 }
7890 return rcStrict;
7891}
7892
7893
7894/**
7895 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7896 */
7897HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7898{
7899 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7900
7901 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7902 if (EMAreHypercallInstructionsEnabled(pVCpu))
7903 {
7904 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7905 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
7906 | CPUMCTX_EXTRN_RFLAGS
7907 | CPUMCTX_EXTRN_CR0
7908 | CPUMCTX_EXTRN_SS
7909 | CPUMCTX_EXTRN_CS
7910 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
7911 AssertRCReturn(rc, rc);
7912
7913 /* Perform the hypercall. */
7914 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7915 if (rcStrict == VINF_SUCCESS)
7916 {
7917 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7918 AssertRCReturn(rc, rc);
7919 }
7920 else
7921 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7922 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7923 || RT_FAILURE(rcStrict));
7924
7925 /* If the hypercall changes anything other than guest's general-purpose registers,
7926 we would need to reload the guest changed bits here before VM-entry. */
7927 }
7928 else
7929 Log4Func(("Hypercalls not enabled\n"));
7930
7931 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7932 if (RT_FAILURE(rcStrict))
7933 {
7934 vmxHCSetPendingXcptUD(pVCpu);
7935 rcStrict = VINF_SUCCESS;
7936 }
7937
7938 return rcStrict;
7939}
7940
7941
7942/**
7943 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7944 */
7945HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7946{
7947 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7948#ifndef IN_NEM_DARWIN
7949 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7950#endif
7951
7952 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7953 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
7954 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7955 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7956 AssertRCReturn(rc, rc);
7957
7958 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7959
7960 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7961 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7962 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7963 {
7964 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7965 rcStrict = VINF_SUCCESS;
7966 }
7967 else
7968 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7969 VBOXSTRICTRC_VAL(rcStrict)));
7970 return rcStrict;
7971}
7972
7973
7974/**
7975 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7976 */
7977HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7978{
7979 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7980
7981 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7982 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7983 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
7984 AssertRCReturn(rc, rc);
7985
7986 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7987 if (rcStrict == VINF_SUCCESS)
7988 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7989 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7990 {
7991 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7992 rcStrict = VINF_SUCCESS;
7993 }
7994
7995 return rcStrict;
7996}
7997
7998
7999/**
8000 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
8001 */
8002HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8003{
8004 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8005
8006 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8007 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8008 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8009 AssertRCReturn(rc, rc);
8010
8011 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
8012 if (RT_SUCCESS(rcStrict))
8013 {
8014 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8015 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
8016 rcStrict = VINF_SUCCESS;
8017 }
8018
8019 return rcStrict;
8020}
8021
8022
8023/**
8024 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
8025 * VM-exit.
8026 */
8027HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8028{
8029 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8030 return VINF_EM_RESET;
8031}
8032
8033
8034/**
8035 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8036 */
8037HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8038{
8039 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8040
8041 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8042 AssertRCReturn(rc, rc);
8043
8044 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
8045 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8046 rc = VINF_SUCCESS;
8047 else
8048 rc = VINF_EM_HALT;
8049
8050 if (rc != VINF_SUCCESS)
8051 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8052 return rc;
8053}
8054
8055
8056#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8057/**
8058 * VM-exit handler for instructions that result in a \#UD exception delivered to
8059 * the guest.
8060 */
8061HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8062{
8063 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8064 vmxHCSetPendingXcptUD(pVCpu);
8065 return VINF_SUCCESS;
8066}
8067#endif
8068
8069
8070/**
8071 * VM-exit handler for expiry of the VMX-preemption timer.
8072 */
8073HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8074{
8075 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8076
8077 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8078 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8079Log12(("vmxHCExitPreemptTimer:\n"));
8080
8081 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8082 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8083 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8084 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8085 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8086}
8087
8088
8089/**
8090 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8091 */
8092HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8093{
8094 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8095
8096 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8097 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8098 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8099 AssertRCReturn(rc, rc);
8100
8101 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8102 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8103 : HM_CHANGED_RAISED_XCPT_MASK);
8104
8105#ifndef IN_NEM_DARWIN
8106 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8107 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8108 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8109 {
8110 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8111 hmR0VmxUpdateStartVmFunction(pVCpu);
8112 }
8113#endif
8114
8115 return rcStrict;
8116}
8117
8118
8119/**
8120 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8121 */
8122HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8123{
8124 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8125
8126 /** @todo Enable the new code after finding a reliably guest test-case. */
8127#if 1
8128 return VERR_EM_INTERPRETER;
8129#else
8130 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8131 | HMVMX_READ_EXIT_INSTR_INFO
8132 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8133 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8134 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8135 AssertRCReturn(rc, rc);
8136
8137 /* Paranoia. Ensure this has a memory operand. */
8138 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8139
8140 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8141 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8142 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8143 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8144
8145 RTGCPTR GCPtrDesc;
8146 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8147
8148 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8149 GCPtrDesc, uType);
8150 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8151 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8152 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8153 {
8154 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8155 rcStrict = VINF_SUCCESS;
8156 }
8157 return rcStrict;
8158#endif
8159}
8160
8161
8162/**
8163 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8164 * VM-exit.
8165 */
8166HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8167{
8168 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8169 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8170 AssertRCReturn(rc, rc);
8171
8172 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8173 if (RT_FAILURE(rc))
8174 return rc;
8175
8176 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8177 NOREF(uInvalidReason);
8178
8179#ifdef VBOX_STRICT
8180 uint32_t fIntrState;
8181 uint64_t u64Val;
8182 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8183 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8184 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8185
8186 Log4(("uInvalidReason %u\n", uInvalidReason));
8187 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8188 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8189 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8190
8191 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8192 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8193 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8194 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8195 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8196 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8197 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8198 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8199 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8200 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8201 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8202 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8203# ifndef IN_NEM_DARWIN
8204 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8205 {
8206 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8207 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8208 }
8209
8210 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8211# endif
8212#endif
8213
8214 return VERR_VMX_INVALID_GUEST_STATE;
8215}
8216
8217/**
8218 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8219 */
8220HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8221{
8222 /*
8223 * Cumulative notes of all recognized but unexpected VM-exits.
8224 *
8225 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8226 * nested-paging is used.
8227 *
8228 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8229 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8230 * this function (and thereby stop VM execution) for handling such instructions.
8231 *
8232 *
8233 * VMX_EXIT_INIT_SIGNAL:
8234 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8235 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8236 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8237 *
8238 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8239 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8240 * See Intel spec. "23.8 Restrictions on VMX operation".
8241 *
8242 * VMX_EXIT_SIPI:
8243 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8244 * activity state is used. We don't make use of it as our guests don't have direct
8245 * access to the host local APIC.
8246 *
8247 * See Intel spec. 25.3 "Other Causes of VM-exits".
8248 *
8249 * VMX_EXIT_IO_SMI:
8250 * VMX_EXIT_SMI:
8251 * This can only happen if we support dual-monitor treatment of SMI, which can be
8252 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8253 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8254 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8255 *
8256 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8257 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8258 *
8259 * VMX_EXIT_ERR_MSR_LOAD:
8260 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8261 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8262 * execution.
8263 *
8264 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8265 *
8266 * VMX_EXIT_ERR_MACHINE_CHECK:
8267 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8268 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8269 * #MC exception abort class exception is raised. We thus cannot assume a
8270 * reasonable chance of continuing any sort of execution and we bail.
8271 *
8272 * See Intel spec. 15.1 "Machine-check Architecture".
8273 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8274 *
8275 * VMX_EXIT_PML_FULL:
8276 * VMX_EXIT_VIRTUALIZED_EOI:
8277 * VMX_EXIT_APIC_WRITE:
8278 * We do not currently support any of these features and thus they are all unexpected
8279 * VM-exits.
8280 *
8281 * VMX_EXIT_GDTR_IDTR_ACCESS:
8282 * VMX_EXIT_LDTR_TR_ACCESS:
8283 * VMX_EXIT_RDRAND:
8284 * VMX_EXIT_RSM:
8285 * VMX_EXIT_VMFUNC:
8286 * VMX_EXIT_ENCLS:
8287 * VMX_EXIT_RDSEED:
8288 * VMX_EXIT_XSAVES:
8289 * VMX_EXIT_XRSTORS:
8290 * VMX_EXIT_UMWAIT:
8291 * VMX_EXIT_TPAUSE:
8292 * VMX_EXIT_LOADIWKEY:
8293 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8294 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8295 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8296 *
8297 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8298 */
8299 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8300 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8301 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8302}
8303
8304
8305/**
8306 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8307 */
8308HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8309{
8310 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8311
8312 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8313
8314 /** @todo Optimize this: We currently drag in the whole MSR state
8315 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8316 * MSRs required. That would require changes to IEM and possibly CPUM too.
8317 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8318 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8319 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8320 int rc;
8321 switch (idMsr)
8322 {
8323 default:
8324 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8325 __FUNCTION__);
8326 AssertRCReturn(rc, rc);
8327 break;
8328 case MSR_K8_FS_BASE:
8329 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8330 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8331 AssertRCReturn(rc, rc);
8332 break;
8333 case MSR_K8_GS_BASE:
8334 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8335 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8336 AssertRCReturn(rc, rc);
8337 break;
8338 }
8339
8340 Log4Func(("ecx=%#RX32\n", idMsr));
8341
8342#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8343 Assert(!pVmxTransient->fIsNestedGuest);
8344 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8345 {
8346 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8347 && idMsr != MSR_K6_EFER)
8348 {
8349 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8350 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8351 }
8352 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8353 {
8354 Assert(pVmcsInfo->pvMsrBitmap);
8355 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8356 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8357 {
8358 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8359 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8360 }
8361 }
8362 }
8363#endif
8364
8365 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8366 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8367 if (rcStrict == VINF_SUCCESS)
8368 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8369 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8370 {
8371 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8372 rcStrict = VINF_SUCCESS;
8373 }
8374 else
8375 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8376 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8377
8378 return rcStrict;
8379}
8380
8381
8382/**
8383 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8384 */
8385HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8386{
8387 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8388
8389 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8390
8391 /*
8392 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8393 * Although we don't need to fetch the base as it will be overwritten shortly, while
8394 * loading guest-state we would also load the entire segment register including limit
8395 * and attributes and thus we need to load them here.
8396 */
8397 /** @todo Optimize this: We currently drag in the whole MSR state
8398 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8399 * MSRs required. That would require changes to IEM and possibly CPUM too.
8400 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8401 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8402 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8403 int rc;
8404 switch (idMsr)
8405 {
8406 default:
8407 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8408 __FUNCTION__);
8409 AssertRCReturn(rc, rc);
8410 break;
8411
8412 case MSR_K8_FS_BASE:
8413 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8414 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8415 AssertRCReturn(rc, rc);
8416 break;
8417 case MSR_K8_GS_BASE:
8418 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8419 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8420 AssertRCReturn(rc, rc);
8421 break;
8422 }
8423 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8424
8425 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8426 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8427
8428 if (rcStrict == VINF_SUCCESS)
8429 {
8430 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8431
8432 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8433 if ( idMsr == MSR_IA32_APICBASE
8434 || ( idMsr >= MSR_IA32_X2APIC_START
8435 && idMsr <= MSR_IA32_X2APIC_END))
8436 {
8437 /*
8438 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8439 * When full APIC register virtualization is implemented we'll have to make
8440 * sure APIC state is saved from the VMCS before IEM changes it.
8441 */
8442 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8443 }
8444 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8445 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8446 else if (idMsr == MSR_K6_EFER)
8447 {
8448 /*
8449 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8450 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8451 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8452 */
8453 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8454 }
8455
8456 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8457 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8458 {
8459 switch (idMsr)
8460 {
8461 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8462 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8463 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8464 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8465 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8466 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8467 default:
8468 {
8469#ifndef IN_NEM_DARWIN
8470 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8471 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8472 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8473 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8474#else
8475 AssertMsgFailed(("TODO\n"));
8476#endif
8477 break;
8478 }
8479 }
8480 }
8481#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8482 else
8483 {
8484 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8485 switch (idMsr)
8486 {
8487 case MSR_IA32_SYSENTER_CS:
8488 case MSR_IA32_SYSENTER_EIP:
8489 case MSR_IA32_SYSENTER_ESP:
8490 case MSR_K8_FS_BASE:
8491 case MSR_K8_GS_BASE:
8492 {
8493 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8494 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8495 }
8496
8497 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8498 default:
8499 {
8500 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8501 {
8502 /* EFER MSR writes are always intercepted. */
8503 if (idMsr != MSR_K6_EFER)
8504 {
8505 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8506 idMsr));
8507 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8508 }
8509 }
8510
8511 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8512 {
8513 Assert(pVmcsInfo->pvMsrBitmap);
8514 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8515 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8516 {
8517 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8518 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8519 }
8520 }
8521 break;
8522 }
8523 }
8524 }
8525#endif /* VBOX_STRICT */
8526 }
8527 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8528 {
8529 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8530 rcStrict = VINF_SUCCESS;
8531 }
8532 else
8533 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8534 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8535
8536 return rcStrict;
8537}
8538
8539
8540/**
8541 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8542 */
8543HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8544{
8545 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8546
8547 /** @todo The guest has likely hit a contended spinlock. We might want to
8548 * poke a schedule different guest VCPU. */
8549 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8550 if (RT_SUCCESS(rc))
8551 return VINF_EM_RAW_INTERRUPT;
8552
8553 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8554 return rc;
8555}
8556
8557
8558/**
8559 * VM-exit handler for when the TPR value is lowered below the specified
8560 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8561 */
8562HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8563{
8564 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8565 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8566
8567 /*
8568 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8569 * We'll re-evaluate pending interrupts and inject them before the next VM
8570 * entry so we can just continue execution here.
8571 */
8572 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8573 return VINF_SUCCESS;
8574}
8575
8576
8577/**
8578 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8579 * VM-exit.
8580 *
8581 * @retval VINF_SUCCESS when guest execution can continue.
8582 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8583 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8584 * incompatible guest state for VMX execution (real-on-v86 case).
8585 */
8586HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8587{
8588 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8589 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8590
8591 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8592 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8593 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8594
8595 VBOXSTRICTRC rcStrict;
8596 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8597 uint64_t const uExitQual = pVmxTransient->uExitQual;
8598 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8599 switch (uAccessType)
8600 {
8601 /*
8602 * MOV to CRx.
8603 */
8604 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8605 {
8606 /*
8607 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8608 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8609 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8610 * PAE PDPTEs as well.
8611 */
8612 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8613 AssertRCReturn(rc, rc);
8614
8615 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8616#ifndef IN_NEM_DARWIN
8617 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8618#endif
8619 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8620 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8621
8622 /*
8623 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8624 * - When nested paging isn't used.
8625 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8626 * - We are executing in the VM debug loop.
8627 */
8628#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8629# ifndef IN_NEM_DARWIN
8630 Assert( iCrReg != 3
8631 || !VM_IS_VMX_NESTED_PAGING(pVM)
8632 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8633 || pVCpu->hmr0.s.fUsingDebugLoop);
8634# else
8635 Assert( iCrReg != 3
8636 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8637# endif
8638#endif
8639
8640 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8641 Assert( iCrReg != 8
8642 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8643
8644 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8645 AssertMsg( rcStrict == VINF_SUCCESS
8646 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8647
8648#ifndef IN_NEM_DARWIN
8649 /*
8650 * This is a kludge for handling switches back to real mode when we try to use
8651 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8652 * deal with special selector values, so we have to return to ring-3 and run
8653 * there till the selector values are V86 mode compatible.
8654 *
8655 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8656 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8657 * this function.
8658 */
8659 if ( iCrReg == 0
8660 && rcStrict == VINF_SUCCESS
8661 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8662 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8663 && (uOldCr0 & X86_CR0_PE)
8664 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8665 {
8666 /** @todo Check selectors rather than returning all the time. */
8667 Assert(!pVmxTransient->fIsNestedGuest);
8668 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8669 rcStrict = VINF_EM_RESCHEDULE_REM;
8670 }
8671#endif
8672
8673 break;
8674 }
8675
8676 /*
8677 * MOV from CRx.
8678 */
8679 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8680 {
8681 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8682 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8683
8684 /*
8685 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8686 * - When nested paging isn't used.
8687 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8688 * - We are executing in the VM debug loop.
8689 */
8690#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8691# ifndef IN_NEM_DARWIN
8692 Assert( iCrReg != 3
8693 || !VM_IS_VMX_NESTED_PAGING(pVM)
8694 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8695 || pVCpu->hmr0.s.fLeaveDone);
8696# else
8697 Assert( iCrReg != 3
8698 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8699# endif
8700#endif
8701
8702 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8703 Assert( iCrReg != 8
8704 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8705
8706 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8707 break;
8708 }
8709
8710 /*
8711 * CLTS (Clear Task-Switch Flag in CR0).
8712 */
8713 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8714 {
8715 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8716 break;
8717 }
8718
8719 /*
8720 * LMSW (Load Machine-Status Word into CR0).
8721 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8722 */
8723 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8724 {
8725 RTGCPTR GCPtrEffDst;
8726 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8727 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8728 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8729 if (fMemOperand)
8730 {
8731 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8732 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8733 }
8734 else
8735 GCPtrEffDst = NIL_RTGCPTR;
8736 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8737 break;
8738 }
8739
8740 default:
8741 {
8742 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8743 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8744 }
8745 }
8746
8747 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8748 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8749 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8750
8751 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8752 NOREF(pVM);
8753 return rcStrict;
8754}
8755
8756
8757/**
8758 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8759 * VM-exit.
8760 */
8761HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8762{
8763 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8764 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8765
8766 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8767 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8768 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8769 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8770#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8771 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8772 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8773 AssertRCReturn(rc, rc);
8774
8775 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8776 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8777 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8778 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8779 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8780 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8781 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8782 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8783
8784 /*
8785 * Update exit history to see if this exit can be optimized.
8786 */
8787 VBOXSTRICTRC rcStrict;
8788 PCEMEXITREC pExitRec = NULL;
8789 if ( !fGstStepping
8790 && !fDbgStepping)
8791 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8792 !fIOString
8793 ? !fIOWrite
8794 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8795 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8796 : !fIOWrite
8797 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8798 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8799 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8800 if (!pExitRec)
8801 {
8802 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8803 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8804
8805 uint32_t const cbValue = s_aIOSizes[uIOSize];
8806 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8807 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8808 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8809 if (fIOString)
8810 {
8811 /*
8812 * INS/OUTS - I/O String instruction.
8813 *
8814 * Use instruction-information if available, otherwise fall back on
8815 * interpreting the instruction.
8816 */
8817 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8818 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8819 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8820 if (fInsOutsInfo)
8821 {
8822 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8823 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8824 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8825 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8826 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8827 if (fIOWrite)
8828 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8829 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8830 else
8831 {
8832 /*
8833 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8834 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8835 * See Intel Instruction spec. for "INS".
8836 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8837 */
8838 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8839 }
8840 }
8841 else
8842 rcStrict = IEMExecOne(pVCpu);
8843
8844 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8845 fUpdateRipAlready = true;
8846 }
8847 else
8848 {
8849 /*
8850 * IN/OUT - I/O instruction.
8851 */
8852 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8853 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8854 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8855 if (fIOWrite)
8856 {
8857 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8858 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8859#ifndef IN_NEM_DARWIN
8860 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8861 && !pCtx->eflags.Bits.u1TF)
8862 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8863#endif
8864 }
8865 else
8866 {
8867 uint32_t u32Result = 0;
8868 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8869 if (IOM_SUCCESS(rcStrict))
8870 {
8871 /* Save result of I/O IN instr. in AL/AX/EAX. */
8872 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8873 }
8874#ifndef IN_NEM_DARWIN
8875 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8876 && !pCtx->eflags.Bits.u1TF)
8877 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8878#endif
8879 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8880 }
8881 }
8882
8883 if (IOM_SUCCESS(rcStrict))
8884 {
8885 if (!fUpdateRipAlready)
8886 {
8887 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8888 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8889 }
8890
8891 /*
8892 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8893 * while booting Fedora 17 64-bit guest.
8894 *
8895 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8896 */
8897 if (fIOString)
8898 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8899
8900 /*
8901 * If any I/O breakpoints are armed, we need to check if one triggered
8902 * and take appropriate action.
8903 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8904 */
8905#if 1
8906 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
8907#else
8908 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
8909 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
8910 AssertRCReturn(rc, rc);
8911#endif
8912
8913 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8914 * execution engines about whether hyper BPs and such are pending. */
8915 uint32_t const uDr7 = pCtx->dr[7];
8916 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8917 && X86_DR7_ANY_RW_IO(uDr7)
8918 && (pCtx->cr4 & X86_CR4_DE))
8919 || DBGFBpIsHwIoArmed(pVM)))
8920 {
8921 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8922
8923#ifndef IN_NEM_DARWIN
8924 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8925 VMMRZCallRing3Disable(pVCpu);
8926 HM_DISABLE_PREEMPT(pVCpu);
8927
8928 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8929
8930 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8931 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8932 {
8933 /* Raise #DB. */
8934 if (fIsGuestDbgActive)
8935 ASMSetDR6(pCtx->dr[6]);
8936 if (pCtx->dr[7] != uDr7)
8937 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8938
8939 vmxHCSetPendingXcptDB(pVCpu);
8940 }
8941 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8942 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8943 else if ( rcStrict2 != VINF_SUCCESS
8944 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8945 rcStrict = rcStrict2;
8946 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8947
8948 HM_RESTORE_PREEMPT();
8949 VMMRZCallRing3Enable(pVCpu);
8950#else
8951 /** @todo */
8952#endif
8953 }
8954 }
8955
8956#ifdef VBOX_STRICT
8957 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8958 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8959 Assert(!fIOWrite);
8960 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8961 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8962 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8963 Assert(fIOWrite);
8964 else
8965 {
8966# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8967 * statuses, that the VMM device and some others may return. See
8968 * IOM_SUCCESS() for guidance. */
8969 AssertMsg( RT_FAILURE(rcStrict)
8970 || rcStrict == VINF_SUCCESS
8971 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8972 || rcStrict == VINF_EM_DBG_BREAKPOINT
8973 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8974 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8975# endif
8976 }
8977#endif
8978 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8979 }
8980 else
8981 {
8982 /*
8983 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8984 */
8985 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
8986 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8987 AssertRCReturn(rc2, rc2);
8988 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8989 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8990 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8991 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8992 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8993 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8994
8995 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8996 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8997
8998 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8999 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9000 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9001 }
9002 return rcStrict;
9003}
9004
9005
9006/**
9007 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
9008 * VM-exit.
9009 */
9010HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9011{
9012 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9013
9014 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
9015 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9016 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
9017 {
9018 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
9019 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9020 {
9021 uint32_t uErrCode;
9022 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
9023 {
9024 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9025 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
9026 }
9027 else
9028 uErrCode = 0;
9029
9030 RTGCUINTPTR GCPtrFaultAddress;
9031 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
9032 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
9033 else
9034 GCPtrFaultAddress = 0;
9035
9036 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9037
9038 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
9039 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
9040
9041 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
9042 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
9043 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9044 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9045 }
9046 }
9047
9048 /* Fall back to the interpreter to emulate the task-switch. */
9049 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9050 return VERR_EM_INTERPRETER;
9051}
9052
9053
9054/**
9055 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9056 */
9057HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9058{
9059 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9060
9061 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9062 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9063 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9064 AssertRC(rc);
9065 return VINF_EM_DBG_STEPPED;
9066}
9067
9068
9069/**
9070 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9071 */
9072HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9073{
9074 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9075 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9076
9077 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9078 | HMVMX_READ_EXIT_INSTR_LEN
9079 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9080 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9081 | HMVMX_READ_IDT_VECTORING_INFO
9082 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9083
9084 /*
9085 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9086 */
9087 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9088 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9089 {
9090 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9091 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9092 {
9093 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9094 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9095 }
9096 }
9097 else
9098 {
9099 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9100 return rcStrict;
9101 }
9102
9103 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9104 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9105 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9106 AssertRCReturn(rc, rc);
9107
9108 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9109 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9110 switch (uAccessType)
9111 {
9112#ifndef IN_NEM_DARWIN
9113 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9114 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9115 {
9116 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9117 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9118 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9119
9120 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9121 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9122 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9123 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9124 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9125
9126 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9127 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9128 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9129 if ( rcStrict == VINF_SUCCESS
9130 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9131 || rcStrict == VERR_PAGE_NOT_PRESENT)
9132 {
9133 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9134 | HM_CHANGED_GUEST_APIC_TPR);
9135 rcStrict = VINF_SUCCESS;
9136 }
9137 break;
9138 }
9139#else
9140 /** @todo */
9141#endif
9142
9143 default:
9144 {
9145 Log4Func(("uAccessType=%#x\n", uAccessType));
9146 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9147 break;
9148 }
9149 }
9150
9151 if (rcStrict != VINF_SUCCESS)
9152 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9153 return rcStrict;
9154}
9155
9156
9157/**
9158 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9159 * VM-exit.
9160 */
9161HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9162{
9163 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9164 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9165
9166 /*
9167 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9168 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9169 * must emulate the MOV DRx access.
9170 */
9171 if (!pVmxTransient->fIsNestedGuest)
9172 {
9173 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9174 if ( pVmxTransient->fWasGuestDebugStateActive
9175#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9176 && !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx
9177#endif
9178 )
9179 {
9180 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9181 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9182 }
9183
9184 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9185 && !pVmxTransient->fWasHyperDebugStateActive)
9186 {
9187 Assert(!DBGFIsStepping(pVCpu));
9188 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9189
9190 /* Whether we disable intercepting MOV DRx instructions and resume
9191 the current one, or emulate it and keep intercepting them is
9192 configurable. Though it usually comes down to whether there are
9193 any new DR6 & DR7 bits (RTM) we want to hide from the guest. */
9194#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9195 bool const fResumeInstruction = !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx;
9196#else
9197 bool const fResumeInstruction = true;
9198#endif
9199 if (fResumeInstruction)
9200 {
9201 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9202 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9203 AssertRC(rc);
9204 }
9205
9206#ifndef IN_NEM_DARWIN
9207 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9208 VMMRZCallRing3Disable(pVCpu);
9209 HM_DISABLE_PREEMPT(pVCpu);
9210
9211 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9212 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9213 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9214
9215 HM_RESTORE_PREEMPT();
9216 VMMRZCallRing3Enable(pVCpu);
9217#else
9218 CPUMR3NemActivateGuestDebugState(pVCpu);
9219 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9220 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9221#endif
9222
9223 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9224 if (fResumeInstruction)
9225 {
9226#ifdef VBOX_WITH_STATISTICS
9227 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9228 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9229 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9230 else
9231 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9232#endif
9233 return VINF_SUCCESS;
9234 }
9235 }
9236 }
9237
9238 /*
9239 * Import state. We must have DR7 loaded here as it's always consulted,
9240 * both for reading and writing. The other debug registers are never
9241 * exported as such.
9242 */
9243 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9244 int rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
9245 | CPUMCTX_EXTRN_GPRS_MASK
9246 | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9247 AssertRCReturn(rc, rc);
9248
9249 uint8_t const iGReg = VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual);
9250 uint8_t const iDrReg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9251 Log4Func(("cs:rip=%#04x:%08RX64 r%d %s dr%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iGReg,
9252 VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE ? "->" : "<-", iDrReg));
9253
9254 VBOXSTRICTRC rcStrict;
9255 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9256 {
9257 /*
9258 * Write DRx register.
9259 */
9260 rcStrict = IEMExecDecodedMovDRxWrite(pVCpu, pVmxTransient->cbExitInstr, iDrReg, iGReg);
9261 AssertMsg( rcStrict == VINF_SUCCESS
9262 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9263
9264 if (rcStrict == VINF_SUCCESS)
9265 {
9266 /** @todo r=bird: Not sure why we always flag DR7 as modified here, but I've
9267 * kept it for now to avoid breaking something non-obvious. */
9268 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9269 | HM_CHANGED_GUEST_DR7);
9270 /* Update the DR6 register if guest debug state is active, otherwise we'll
9271 trash it when calling CPUMR0DebugStateMaybeSaveGuestAndRestoreHost. */
9272 if (iDrReg == 6 && CPUMIsGuestDebugStateActive(pVCpu))
9273 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
9274 Log4Func(("r%d=%#RX64 => dr%d=%#RX64\n", iGReg, pVCpu->cpum.GstCtx.aGRegs[iGReg].u,
9275 iDrReg, pVCpu->cpum.GstCtx.dr[iDrReg]));
9276 }
9277 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9278 {
9279 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9280 rcStrict = VINF_SUCCESS;
9281 }
9282
9283 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9284 }
9285 else
9286 {
9287 /*
9288 * Read DRx register into a general purpose register.
9289 */
9290 rcStrict = IEMExecDecodedMovDRxRead(pVCpu, pVmxTransient->cbExitInstr, iGReg, iDrReg);
9291 AssertMsg( rcStrict == VINF_SUCCESS
9292 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9293
9294 if (rcStrict == VINF_SUCCESS)
9295 {
9296 if (iGReg == X86_GREG_xSP)
9297 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9298 | HM_CHANGED_GUEST_RSP);
9299 else
9300 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9301 }
9302 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9303 {
9304 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9305 rcStrict = VINF_SUCCESS;
9306 }
9307
9308 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9309 }
9310
9311 return rcStrict;
9312}
9313
9314
9315/**
9316 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9317 * Conditional VM-exit.
9318 */
9319HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9320{
9321 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9322
9323#ifndef IN_NEM_DARWIN
9324 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9325
9326 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9327 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9328 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9329 | HMVMX_READ_IDT_VECTORING_INFO
9330 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9331 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9332
9333 /*
9334 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9335 */
9336 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9337 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9338 {
9339 /*
9340 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9341 * instruction emulation to inject the original event. Otherwise, injecting the original event
9342 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9343 */
9344 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9345 { /* likely */ }
9346 else
9347 {
9348 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9349# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9350 /** @todo NSTVMX: Think about how this should be handled. */
9351 if (pVmxTransient->fIsNestedGuest)
9352 return VERR_VMX_IPE_3;
9353# endif
9354 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9355 }
9356 }
9357 else
9358 {
9359 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9360 return rcStrict;
9361 }
9362
9363 /*
9364 * Get sufficient state and update the exit history entry.
9365 */
9366 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9367 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9368 AssertRCReturn(rc, rc);
9369
9370 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9371 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9372 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9373 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9374 if (!pExitRec)
9375 {
9376 /*
9377 * If we succeed, resume guest execution.
9378 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9379 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9380 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9381 * weird case. See @bugref{6043}.
9382 */
9383 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9384/** @todo bird: We can probably just go straight to IOM here and assume that
9385 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9386 * well. However, we need to address that aliasing workarounds that
9387 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9388 *
9389 * Might also be interesting to see if we can get this done more or
9390 * less locklessly inside IOM. Need to consider the lookup table
9391 * updating and use a bit more carefully first (or do all updates via
9392 * rendezvous) */
9393 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9394 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9395 if ( rcStrict == VINF_SUCCESS
9396 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9397 || rcStrict == VERR_PAGE_NOT_PRESENT)
9398 {
9399 /* Successfully handled MMIO operation. */
9400 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9401 | HM_CHANGED_GUEST_APIC_TPR);
9402 rcStrict = VINF_SUCCESS;
9403 }
9404 }
9405 else
9406 {
9407 /*
9408 * Frequent exit or something needing probing. Call EMHistoryExec.
9409 */
9410 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9411 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9412
9413 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9414 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9415
9416 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9417 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9418 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9419 }
9420 return rcStrict;
9421#else
9422 AssertFailed();
9423 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9424#endif
9425}
9426
9427
9428/**
9429 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9430 * VM-exit.
9431 */
9432HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9433{
9434 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9435#ifndef IN_NEM_DARWIN
9436 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9437
9438 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9439 | HMVMX_READ_EXIT_INSTR_LEN
9440 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9441 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9442 | HMVMX_READ_IDT_VECTORING_INFO
9443 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9444 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9445
9446 /*
9447 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9448 */
9449 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9450 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9451 {
9452 /*
9453 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9454 * we shall resolve the nested #PF and re-inject the original event.
9455 */
9456 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9457 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9458 }
9459 else
9460 {
9461 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9462 return rcStrict;
9463 }
9464
9465 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9466 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9467 AssertRCReturn(rc, rc);
9468
9469 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9470 uint64_t const uExitQual = pVmxTransient->uExitQual;
9471 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9472
9473 RTGCUINT uErrorCode = 0;
9474 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9475 uErrorCode |= X86_TRAP_PF_ID;
9476 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9477 uErrorCode |= X86_TRAP_PF_RW;
9478 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9479 uErrorCode |= X86_TRAP_PF_P;
9480
9481 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9482 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9483
9484 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9485
9486 /*
9487 * Handle the pagefault trap for the nested shadow table.
9488 */
9489 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9490 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9491 TRPMResetTrap(pVCpu);
9492
9493 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9494 if ( rcStrict == VINF_SUCCESS
9495 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9496 || rcStrict == VERR_PAGE_NOT_PRESENT)
9497 {
9498 /* Successfully synced our nested page tables. */
9499 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9500 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9501 return VINF_SUCCESS;
9502 }
9503 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9504 return rcStrict;
9505
9506#else /* IN_NEM_DARWIN */
9507 PVM pVM = pVCpu->CTX_SUFF(pVM);
9508 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9509 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9510 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9511 vmxHCImportGuestRip(pVCpu);
9512 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9513
9514 /*
9515 * Ask PGM for information about the given GCPhys. We need to check if we're
9516 * out of sync first.
9517 */
9518 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9519 false,
9520 false };
9521 PGMPHYSNEMPAGEINFO Info;
9522 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9523 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9524 if (RT_SUCCESS(rc))
9525 {
9526 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9527 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9528 {
9529 if (State.fCanResume)
9530 {
9531 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9532 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9533 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9534 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9535 State.fDidSomething ? "" : " no-change"));
9536 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9537 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9538 return VINF_SUCCESS;
9539 }
9540 }
9541
9542 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9543 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9544 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9545 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9546 State.fDidSomething ? "" : " no-change"));
9547 }
9548 else
9549 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9550 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9551 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9552
9553 /*
9554 * Emulate the memory access, either access handler or special memory.
9555 */
9556 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9557 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9558 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9559 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9560 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9561
9562 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9563 AssertRCReturn(rc, rc);
9564
9565 VBOXSTRICTRC rcStrict;
9566 if (!pExitRec)
9567 rcStrict = IEMExecOne(pVCpu);
9568 else
9569 {
9570 /* Frequent access or probing. */
9571 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9572 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9573 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9574 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9575 }
9576
9577 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9578
9579 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9580 return rcStrict;
9581#endif /* IN_NEM_DARWIN */
9582}
9583
9584#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9585
9586/**
9587 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9588 */
9589HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9590{
9591 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9592
9593 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9594 | HMVMX_READ_EXIT_INSTR_INFO
9595 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9596 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9597 | CPUMCTX_EXTRN_SREG_MASK
9598 | CPUMCTX_EXTRN_HWVIRT
9599 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9600 AssertRCReturn(rc, rc);
9601
9602 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9603
9604 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9605 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9606
9607 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9608 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9609 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9610 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9611 {
9612 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9613 rcStrict = VINF_SUCCESS;
9614 }
9615 return rcStrict;
9616}
9617
9618
9619/**
9620 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9621 */
9622HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9623{
9624 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9625
9626 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9627 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9628 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9629 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9630 AssertRCReturn(rc, rc);
9631
9632 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9633
9634 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9635 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9636 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9637 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9638 {
9639 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9640 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9641 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9642 }
9643 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9644 return rcStrict;
9645}
9646
9647
9648/**
9649 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9650 */
9651HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9652{
9653 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9654
9655 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9656 | HMVMX_READ_EXIT_INSTR_INFO
9657 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9658 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9659 | CPUMCTX_EXTRN_SREG_MASK
9660 | CPUMCTX_EXTRN_HWVIRT
9661 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9662 AssertRCReturn(rc, rc);
9663
9664 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9665
9666 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9667 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9668
9669 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9670 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9671 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9672 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9673 {
9674 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9675 rcStrict = VINF_SUCCESS;
9676 }
9677 return rcStrict;
9678}
9679
9680
9681/**
9682 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9683 */
9684HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9685{
9686 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9687
9688 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9689 | HMVMX_READ_EXIT_INSTR_INFO
9690 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9691 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9692 | CPUMCTX_EXTRN_SREG_MASK
9693 | CPUMCTX_EXTRN_HWVIRT
9694 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9695 AssertRCReturn(rc, rc);
9696
9697 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9698
9699 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9700 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9701
9702 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9703 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9704 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9705 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9706 {
9707 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9708 rcStrict = VINF_SUCCESS;
9709 }
9710 return rcStrict;
9711}
9712
9713
9714/**
9715 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9716 */
9717HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9718{
9719 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9720
9721 /*
9722 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9723 * thus might not need to import the shadow VMCS state, it's safer just in case
9724 * code elsewhere dares look at unsynced VMCS fields.
9725 */
9726 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9727 | HMVMX_READ_EXIT_INSTR_INFO
9728 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9729 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9730 | CPUMCTX_EXTRN_SREG_MASK
9731 | CPUMCTX_EXTRN_HWVIRT
9732 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9733 AssertRCReturn(rc, rc);
9734
9735 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9736
9737 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9738 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9739 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9740
9741 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9742 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9743 {
9744 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9745
9746# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9747 /* Try for exit optimization. This is on the following instruction
9748 because it would be a waste of time to have to reinterpret the
9749 already decoded vmwrite instruction. */
9750 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9751 if (pExitRec)
9752 {
9753 /* Frequent access or probing. */
9754 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9755 AssertRCReturn(rc, rc);
9756
9757 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9758 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9759 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9760 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9761 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9762 }
9763# endif
9764 }
9765 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9766 {
9767 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9768 rcStrict = VINF_SUCCESS;
9769 }
9770 return rcStrict;
9771}
9772
9773
9774/**
9775 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9776 */
9777HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9778{
9779 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9780
9781 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9782 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9783 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9784 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9785 AssertRCReturn(rc, rc);
9786
9787 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9788
9789 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9790 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9791 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9792 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9793 {
9794 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9795 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9796 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9797 }
9798 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9799 return rcStrict;
9800}
9801
9802
9803/**
9804 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9805 */
9806HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9807{
9808 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9809
9810 /*
9811 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9812 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9813 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9814 */
9815 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9816 | HMVMX_READ_EXIT_INSTR_INFO
9817 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9818 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9819 | CPUMCTX_EXTRN_SREG_MASK
9820 | CPUMCTX_EXTRN_HWVIRT
9821 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9822 AssertRCReturn(rc, rc);
9823
9824 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9825
9826 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9827 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9828 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9829
9830 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9831 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9832 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9833 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9834 {
9835 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9836 rcStrict = VINF_SUCCESS;
9837 }
9838 return rcStrict;
9839}
9840
9841
9842/**
9843 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9844 */
9845HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9846{
9847 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9848
9849 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9850 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9851 | CPUMCTX_EXTRN_HWVIRT
9852 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9853 AssertRCReturn(rc, rc);
9854
9855 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9856
9857 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9858 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9859 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9860 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9861 {
9862 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9863 rcStrict = VINF_SUCCESS;
9864 }
9865 return rcStrict;
9866}
9867
9868
9869/**
9870 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9871 */
9872HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9873{
9874 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9875
9876 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9877 | HMVMX_READ_EXIT_INSTR_INFO
9878 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9879 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9880 | CPUMCTX_EXTRN_SREG_MASK
9881 | CPUMCTX_EXTRN_HWVIRT
9882 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9883 AssertRCReturn(rc, rc);
9884
9885 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9886
9887 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9888 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9889
9890 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9891 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9892 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9893 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9894 {
9895 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9896 rcStrict = VINF_SUCCESS;
9897 }
9898 return rcStrict;
9899}
9900
9901
9902/**
9903 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9904 */
9905HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9906{
9907 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9908
9909 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9910 | HMVMX_READ_EXIT_INSTR_INFO
9911 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9912 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9913 | CPUMCTX_EXTRN_SREG_MASK
9914 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9915 AssertRCReturn(rc, rc);
9916
9917 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9918
9919 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9920 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9921
9922 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9923 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9924 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9925 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9926 {
9927 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9928 rcStrict = VINF_SUCCESS;
9929 }
9930 return rcStrict;
9931}
9932
9933
9934# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9935/**
9936 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9937 */
9938HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9939{
9940 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9941
9942 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9943 | HMVMX_READ_EXIT_INSTR_INFO
9944 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9945 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9946 | CPUMCTX_EXTRN_SREG_MASK
9947 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9948 AssertRCReturn(rc, rc);
9949
9950 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9951
9952 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9953 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9954
9955 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9956 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9957 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9958 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9959 {
9960 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9961 rcStrict = VINF_SUCCESS;
9962 }
9963 return rcStrict;
9964}
9965# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9966#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9967/** @} */
9968
9969
9970#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9971/** @name Nested-guest VM-exit handlers.
9972 * @{
9973 */
9974/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9975/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9976/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9977
9978/**
9979 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9980 * Conditional VM-exit.
9981 */
9982HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9983{
9984 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9985
9986 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
9987
9988 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9989 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9990 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9991
9992 switch (uExitIntType)
9993 {
9994# ifndef IN_NEM_DARWIN
9995 /*
9996 * Physical NMIs:
9997 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9998 */
9999 case VMX_EXIT_INT_INFO_TYPE_NMI:
10000 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
10001# endif
10002
10003 /*
10004 * Hardware exceptions,
10005 * Software exceptions,
10006 * Privileged software exceptions:
10007 * Figure out if the exception must be delivered to the guest or the nested-guest.
10008 */
10009 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10010 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10011 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10012 {
10013 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10014 | HMVMX_READ_EXIT_INSTR_LEN
10015 | HMVMX_READ_IDT_VECTORING_INFO
10016 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10017
10018 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10019 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
10020 {
10021 /* Exit qualification is required for debug and page-fault exceptions. */
10022 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10023
10024 /*
10025 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
10026 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
10027 * length. However, if delivery of a software interrupt, software exception or privileged
10028 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
10029 */
10030 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10031 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
10032 pVmxTransient->uExitIntErrorCode,
10033 pVmxTransient->uIdtVectoringInfo,
10034 pVmxTransient->uIdtVectoringErrorCode);
10035#ifdef DEBUG_ramshankar
10036 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10037 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
10038 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
10039 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
10040 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
10041 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
10042#endif
10043 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
10044 }
10045
10046 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
10047 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10048 return vmxHCExitXcpt(pVCpu, pVmxTransient);
10049 }
10050
10051 /*
10052 * Software interrupts:
10053 * VM-exits cannot be caused by software interrupts.
10054 *
10055 * External interrupts:
10056 * This should only happen when "acknowledge external interrupts on VM-exit"
10057 * control is set. However, we never set this when executing a guest or
10058 * nested-guest. For nested-guests it is emulated while injecting interrupts into
10059 * the guest.
10060 */
10061 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10062 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
10063 default:
10064 {
10065 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
10066 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10067 }
10068 }
10069}
10070
10071
10072/**
10073 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10074 * Unconditional VM-exit.
10075 */
10076HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10077{
10078 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10079 return IEMExecVmxVmexitTripleFault(pVCpu);
10080}
10081
10082
10083/**
10084 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10085 */
10086HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10087{
10088 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10089
10090 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10091 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10092 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10093}
10094
10095
10096/**
10097 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10098 */
10099HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10100{
10101 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10102
10103 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10104 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10105 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10106}
10107
10108
10109/**
10110 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10111 * Unconditional VM-exit.
10112 */
10113HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10114{
10115 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10116
10117 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10118 | HMVMX_READ_EXIT_INSTR_LEN
10119 | HMVMX_READ_IDT_VECTORING_INFO
10120 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10121
10122 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10123 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10124 pVmxTransient->uIdtVectoringErrorCode);
10125 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10126}
10127
10128
10129/**
10130 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10131 */
10132HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10133{
10134 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10135
10136 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10137 {
10138 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10139 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10140 }
10141 return vmxHCExitHlt(pVCpu, pVmxTransient);
10142}
10143
10144
10145/**
10146 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10147 */
10148HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10149{
10150 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10151
10152 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10153 {
10154 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10155 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10156 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10157 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10158 }
10159 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10160}
10161
10162
10163/**
10164 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10165 */
10166HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10167{
10168 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10169
10170 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10171 {
10172 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10173 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10174 }
10175 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10176}
10177
10178
10179/**
10180 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10181 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10182 */
10183HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10184{
10185 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10186
10187 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10188 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10189
10190 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10191
10192 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10193 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10194 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10195
10196 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10197 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10198 u64VmcsField &= UINT64_C(0xffffffff);
10199
10200 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10201 {
10202 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10203 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10204 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10205 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10206 }
10207
10208 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10209 return vmxHCExitVmread(pVCpu, pVmxTransient);
10210 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10211}
10212
10213
10214/**
10215 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10216 */
10217HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10218{
10219 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10220
10221 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10222 {
10223 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10224 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10225 }
10226
10227 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10228}
10229
10230
10231/**
10232 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10233 * Conditional VM-exit.
10234 */
10235HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10236{
10237 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10238
10239 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10240 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10241
10242 VBOXSTRICTRC rcStrict;
10243 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10244 switch (uAccessType)
10245 {
10246 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10247 {
10248 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10249 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10250 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10251 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10252
10253 bool fIntercept;
10254 switch (iCrReg)
10255 {
10256 case 0:
10257 case 4:
10258 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10259 break;
10260
10261 case 3:
10262 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10263 break;
10264
10265 case 8:
10266 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10267 break;
10268
10269 default:
10270 fIntercept = false;
10271 break;
10272 }
10273 if (fIntercept)
10274 {
10275 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10276 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10277 }
10278 else
10279 {
10280 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10281 AssertRCReturn(rc, rc);
10282 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10283 }
10284 break;
10285 }
10286
10287 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10288 {
10289 /*
10290 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10291 * CR2 reads do not cause a VM-exit.
10292 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10293 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10294 */
10295 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10296 if ( iCrReg == 3
10297 || iCrReg == 8)
10298 {
10299 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10300 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10301 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10302 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10303 {
10304 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10305 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10306 }
10307 else
10308 {
10309 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10310 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10311 }
10312 }
10313 else
10314 {
10315 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10316 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10317 }
10318 break;
10319 }
10320
10321 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10322 {
10323 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10324 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10325 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10326 if ( (uGstHostMask & X86_CR0_TS)
10327 && (uReadShadow & X86_CR0_TS))
10328 {
10329 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10330 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10331 }
10332 else
10333 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10334 break;
10335 }
10336
10337 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10338 {
10339 RTGCPTR GCPtrEffDst;
10340 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10341 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10342 if (fMemOperand)
10343 {
10344 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10345 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10346 }
10347 else
10348 GCPtrEffDst = NIL_RTGCPTR;
10349
10350 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10351 {
10352 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10353 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10354 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10355 }
10356 else
10357 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10358 break;
10359 }
10360
10361 default:
10362 {
10363 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10364 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10365 }
10366 }
10367
10368 if (rcStrict == VINF_IEM_RAISED_XCPT)
10369 {
10370 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10371 rcStrict = VINF_SUCCESS;
10372 }
10373 return rcStrict;
10374}
10375
10376
10377/**
10378 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10379 * Conditional VM-exit.
10380 */
10381HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10382{
10383 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10384
10385 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10386 {
10387 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10388 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10389 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10390 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10391 }
10392 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10393}
10394
10395
10396/**
10397 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10398 * Conditional VM-exit.
10399 */
10400HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10401{
10402 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10403
10404 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10405
10406 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10407 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10408 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10409
10410 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10411 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10412 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10413 {
10414 /*
10415 * IN/OUT instruction:
10416 * - Provides VM-exit instruction length.
10417 *
10418 * INS/OUTS instruction:
10419 * - Provides VM-exit instruction length.
10420 * - Provides Guest-linear address.
10421 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10422 */
10423 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10424 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10425
10426 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10427 pVmxTransient->ExitInstrInfo.u = 0;
10428 pVmxTransient->uGuestLinearAddr = 0;
10429
10430 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10431 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10432 if (fIOString)
10433 {
10434 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10435 if (fVmxInsOutsInfo)
10436 {
10437 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10438 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10439 }
10440 }
10441
10442 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10443 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10444 }
10445 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10446}
10447
10448
10449/**
10450 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10451 */
10452HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10453{
10454 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10455
10456 uint32_t fMsrpm;
10457 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10458 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10459 else
10460 fMsrpm = VMXMSRPM_EXIT_RD;
10461
10462 if (fMsrpm & VMXMSRPM_EXIT_RD)
10463 {
10464 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10465 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10466 }
10467 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10468}
10469
10470
10471/**
10472 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10473 */
10474HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10475{
10476 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10477
10478 uint32_t fMsrpm;
10479 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10480 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10481 else
10482 fMsrpm = VMXMSRPM_EXIT_WR;
10483
10484 if (fMsrpm & VMXMSRPM_EXIT_WR)
10485 {
10486 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10487 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10488 }
10489 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10490}
10491
10492
10493/**
10494 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10495 */
10496HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10497{
10498 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10499
10500 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10501 {
10502 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10503 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10504 }
10505 return vmxHCExitMwait(pVCpu, pVmxTransient);
10506}
10507
10508
10509/**
10510 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10511 * VM-exit.
10512 */
10513HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10514{
10515 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10516
10517 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10518 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10519 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10520 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10521}
10522
10523
10524/**
10525 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10526 */
10527HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10528{
10529 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10530
10531 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10532 {
10533 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10534 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10535 }
10536 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10537}
10538
10539
10540/**
10541 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10542 */
10543HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10544{
10545 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10546
10547 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10548 * PAUSE when executing a nested-guest? If it does not, we would not need
10549 * to check for the intercepts here. Just call VM-exit... */
10550
10551 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10552 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10553 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10554 {
10555 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10556 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10557 }
10558 return vmxHCExitPause(pVCpu, pVmxTransient);
10559}
10560
10561
10562/**
10563 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10564 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10565 */
10566HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10567{
10568 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10569
10570 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10571 {
10572 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10573 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10574 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10575 }
10576 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10577}
10578
10579
10580/**
10581 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10582 * VM-exit.
10583 */
10584HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10585{
10586 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10587
10588 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10589 | HMVMX_READ_EXIT_INSTR_LEN
10590 | HMVMX_READ_IDT_VECTORING_INFO
10591 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10592
10593 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10594
10595 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10596 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10597
10598 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10599 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10600 pVmxTransient->uIdtVectoringErrorCode);
10601 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10602}
10603
10604
10605/**
10606 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10607 * Conditional VM-exit.
10608 */
10609HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10610{
10611 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10612
10613 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10614 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10615 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10616}
10617
10618
10619/**
10620 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10621 * Conditional VM-exit.
10622 */
10623HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10624{
10625 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10626
10627 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10628 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10629 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10630}
10631
10632
10633/**
10634 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10635 */
10636HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10637{
10638 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10639
10640 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10641 {
10642 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10643 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10644 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10645 }
10646 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10647}
10648
10649
10650/**
10651 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10652 */
10653HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10654{
10655 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10656
10657 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10658 {
10659 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10660 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10661 }
10662 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10663}
10664
10665
10666/**
10667 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10668 */
10669HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10670{
10671 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10672
10673 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10674 {
10675 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10676 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10677 | HMVMX_READ_EXIT_INSTR_INFO
10678 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10679 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10680 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10681 }
10682 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10683}
10684
10685
10686/**
10687 * Nested-guest VM-exit handler for invalid-guest state
10688 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10689 */
10690HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10691{
10692 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10693
10694 /*
10695 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10696 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10697 * Handle it like it's in an invalid guest state of the outer guest.
10698 *
10699 * When the fast path is implemented, this should be changed to cause the corresponding
10700 * nested-guest VM-exit.
10701 */
10702 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10703}
10704
10705
10706/**
10707 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10708 * and only provide the instruction length.
10709 *
10710 * Unconditional VM-exit.
10711 */
10712HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10713{
10714 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10715
10716#ifdef VBOX_STRICT
10717 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10718 switch (pVmxTransient->uExitReason)
10719 {
10720 case VMX_EXIT_ENCLS:
10721 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10722 break;
10723
10724 case VMX_EXIT_VMFUNC:
10725 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10726 break;
10727 }
10728#endif
10729
10730 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10731 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10732}
10733
10734
10735/**
10736 * Nested-guest VM-exit handler for instructions that provide instruction length as
10737 * well as more information.
10738 *
10739 * Unconditional VM-exit.
10740 */
10741HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10742{
10743 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10744
10745# ifdef VBOX_STRICT
10746 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10747 switch (pVmxTransient->uExitReason)
10748 {
10749 case VMX_EXIT_GDTR_IDTR_ACCESS:
10750 case VMX_EXIT_LDTR_TR_ACCESS:
10751 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10752 break;
10753
10754 case VMX_EXIT_RDRAND:
10755 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10756 break;
10757
10758 case VMX_EXIT_RDSEED:
10759 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10760 break;
10761
10762 case VMX_EXIT_XSAVES:
10763 case VMX_EXIT_XRSTORS:
10764 /** @todo NSTVMX: Verify XSS-bitmap. */
10765 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10766 break;
10767
10768 case VMX_EXIT_UMWAIT:
10769 case VMX_EXIT_TPAUSE:
10770 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10771 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10772 break;
10773
10774 case VMX_EXIT_LOADIWKEY:
10775 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10776 break;
10777 }
10778# endif
10779
10780 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10781 | HMVMX_READ_EXIT_INSTR_LEN
10782 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10783 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10784 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10785}
10786
10787# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10788
10789/**
10790 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10791 * Conditional VM-exit.
10792 */
10793HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10794{
10795 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10796 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10797
10798 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10799 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10800 {
10801 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10802 | HMVMX_READ_EXIT_INSTR_LEN
10803 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10804 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10805 | HMVMX_READ_IDT_VECTORING_INFO
10806 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10807 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10808 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10809 AssertRCReturn(rc, rc);
10810
10811 /*
10812 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10813 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10814 * it's its problem to deal with that issue and we'll clear the recovered event.
10815 */
10816 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10817 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10818 { /*likely*/ }
10819 else
10820 {
10821 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10822 return rcStrict;
10823 }
10824 uint32_t const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10825
10826 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10827 uint64_t const uExitQual = pVmxTransient->uExitQual;
10828
10829 RTGCPTR GCPtrNestedFault;
10830 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10831 if (fIsLinearAddrValid)
10832 {
10833 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10834 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10835 }
10836 else
10837 GCPtrNestedFault = 0;
10838
10839 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10840 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10841 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10842 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10843 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10844
10845 PGMPTWALK Walk;
10846 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10847 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
10848 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
10849 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10850 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10851 if (RT_SUCCESS(rcStrict))
10852 return rcStrict;
10853
10854 if (fClearEventOnForward)
10855 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10856
10857 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10858 pVmxTransient->uIdtVectoringErrorCode);
10859 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10860 {
10861 VMXVEXITINFO const ExitInfo
10862 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10863 pVmxTransient->uExitQual,
10864 pVmxTransient->cbExitInstr,
10865 pVmxTransient->uGuestLinearAddr,
10866 pVmxTransient->uGuestPhysicalAddr);
10867 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10868 }
10869
10870 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10871 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10872 }
10873
10874 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10875}
10876
10877
10878/**
10879 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10880 * Conditional VM-exit.
10881 */
10882HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10883{
10884 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10885 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10886
10887 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10888 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10889 {
10890 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10891 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10892 AssertRCReturn(rc, rc);
10893
10894 PGMPTWALK Walk;
10895 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10896 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10897 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
10898 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10899 0 /* GCPtrNestedFault */, &Walk);
10900 if (RT_SUCCESS(rcStrict))
10901 {
10902 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10903 return rcStrict;
10904 }
10905
10906 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10907 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
10908 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10909
10910 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10911 pVmxTransient->uIdtVectoringErrorCode);
10912 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10913 }
10914
10915 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10916}
10917
10918# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10919
10920/** @} */
10921#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10922
10923
10924/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10925 * probes.
10926 *
10927 * The following few functions and associated structure contains the bloat
10928 * necessary for providing detailed debug events and dtrace probes as well as
10929 * reliable host side single stepping. This works on the principle of
10930 * "subclassing" the normal execution loop and workers. We replace the loop
10931 * method completely and override selected helpers to add necessary adjustments
10932 * to their core operation.
10933 *
10934 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10935 * any performance for debug and analysis features.
10936 *
10937 * @{
10938 */
10939
10940/**
10941 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10942 * the debug run loop.
10943 */
10944typedef struct VMXRUNDBGSTATE
10945{
10946 /** The RIP we started executing at. This is for detecting that we stepped. */
10947 uint64_t uRipStart;
10948 /** The CS we started executing with. */
10949 uint16_t uCsStart;
10950
10951 /** Whether we've actually modified the 1st execution control field. */
10952 bool fModifiedProcCtls : 1;
10953 /** Whether we've actually modified the 2nd execution control field. */
10954 bool fModifiedProcCtls2 : 1;
10955 /** Whether we've actually modified the exception bitmap. */
10956 bool fModifiedXcptBitmap : 1;
10957
10958 /** We desire the modified the CR0 mask to be cleared. */
10959 bool fClearCr0Mask : 1;
10960 /** We desire the modified the CR4 mask to be cleared. */
10961 bool fClearCr4Mask : 1;
10962 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10963 uint32_t fCpe1Extra;
10964 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10965 uint32_t fCpe1Unwanted;
10966 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10967 uint32_t fCpe2Extra;
10968 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10969 uint32_t bmXcptExtra;
10970 /** The sequence number of the Dtrace provider settings the state was
10971 * configured against. */
10972 uint32_t uDtraceSettingsSeqNo;
10973 /** VM-exits to check (one bit per VM-exit). */
10974 uint32_t bmExitsToCheck[3];
10975
10976 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10977 uint32_t fProcCtlsInitial;
10978 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10979 uint32_t fProcCtls2Initial;
10980 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10981 uint32_t bmXcptInitial;
10982} VMXRUNDBGSTATE;
10983AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10984typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10985
10986
10987/**
10988 * Initializes the VMXRUNDBGSTATE structure.
10989 *
10990 * @param pVCpu The cross context virtual CPU structure of the
10991 * calling EMT.
10992 * @param pVmxTransient The VMX-transient structure.
10993 * @param pDbgState The debug state to initialize.
10994 */
10995static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10996{
10997 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10998 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10999
11000 pDbgState->fModifiedProcCtls = false;
11001 pDbgState->fModifiedProcCtls2 = false;
11002 pDbgState->fModifiedXcptBitmap = false;
11003 pDbgState->fClearCr0Mask = false;
11004 pDbgState->fClearCr4Mask = false;
11005 pDbgState->fCpe1Extra = 0;
11006 pDbgState->fCpe1Unwanted = 0;
11007 pDbgState->fCpe2Extra = 0;
11008 pDbgState->bmXcptExtra = 0;
11009 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
11010 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
11011 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
11012}
11013
11014
11015/**
11016 * Updates the VMSC fields with changes requested by @a pDbgState.
11017 *
11018 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
11019 * immediately before executing guest code, i.e. when interrupts are disabled.
11020 * We don't check status codes here as we cannot easily assert or return in the
11021 * latter case.
11022 *
11023 * @param pVCpu The cross context virtual CPU structure.
11024 * @param pVmxTransient The VMX-transient structure.
11025 * @param pDbgState The debug state.
11026 */
11027static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11028{
11029 /*
11030 * Ensure desired flags in VMCS control fields are set.
11031 * (Ignoring write failure here, as we're committed and it's just debug extras.)
11032 *
11033 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
11034 * there should be no stale data in pCtx at this point.
11035 */
11036 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11037 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
11038 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
11039 {
11040 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
11041 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
11042 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
11043 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
11044 pDbgState->fModifiedProcCtls = true;
11045 }
11046
11047 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11048 {
11049 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11050 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11051 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11052 pDbgState->fModifiedProcCtls2 = true;
11053 }
11054
11055 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11056 {
11057 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11058 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11059 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11060 pDbgState->fModifiedXcptBitmap = true;
11061 }
11062
11063 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11064 {
11065 pVmcsInfo->u64Cr0Mask = 0;
11066 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
11067 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11068 }
11069
11070 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11071 {
11072 pVmcsInfo->u64Cr4Mask = 0;
11073 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11074 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11075 }
11076
11077 NOREF(pVCpu);
11078}
11079
11080
11081/**
11082 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11083 * re-entry next time around.
11084 *
11085 * @returns Strict VBox status code (i.e. informational status codes too).
11086 * @param pVCpu The cross context virtual CPU structure.
11087 * @param pVmxTransient The VMX-transient structure.
11088 * @param pDbgState The debug state.
11089 * @param rcStrict The return code from executing the guest using single
11090 * stepping.
11091 */
11092static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11093 VBOXSTRICTRC rcStrict)
11094{
11095 /*
11096 * Restore VM-exit control settings as we may not reenter this function the
11097 * next time around.
11098 */
11099 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11100
11101 /* We reload the initial value, trigger what we can of recalculations the
11102 next time around. From the looks of things, that's all that's required atm. */
11103 if (pDbgState->fModifiedProcCtls)
11104 {
11105 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11106 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11107 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11108 AssertRC(rc2);
11109 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11110 }
11111
11112 /* We're currently the only ones messing with this one, so just restore the
11113 cached value and reload the field. */
11114 if ( pDbgState->fModifiedProcCtls2
11115 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11116 {
11117 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11118 AssertRC(rc2);
11119 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11120 }
11121
11122 /* If we've modified the exception bitmap, we restore it and trigger
11123 reloading and partial recalculation the next time around. */
11124 if (pDbgState->fModifiedXcptBitmap)
11125 {
11126 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pDbgState->bmXcptInitial);
11127 AssertRC(rc2);
11128 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11129 }
11130
11131 return rcStrict;
11132}
11133
11134
11135/**
11136 * Configures VM-exit controls for current DBGF and DTrace settings.
11137 *
11138 * This updates @a pDbgState and the VMCS execution control fields to reflect
11139 * the necessary VM-exits demanded by DBGF and DTrace.
11140 *
11141 * @param pVCpu The cross context virtual CPU structure.
11142 * @param pVmxTransient The VMX-transient structure. May update
11143 * fUpdatedTscOffsettingAndPreemptTimer.
11144 * @param pDbgState The debug state.
11145 */
11146static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11147{
11148#ifndef IN_NEM_DARWIN
11149 /*
11150 * Take down the dtrace serial number so we can spot changes.
11151 */
11152 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11153 ASMCompilerBarrier();
11154#endif
11155
11156 /*
11157 * We'll rebuild most of the middle block of data members (holding the
11158 * current settings) as we go along here, so start by clearing it all.
11159 */
11160 pDbgState->bmXcptExtra = 0;
11161 pDbgState->fCpe1Extra = 0;
11162 pDbgState->fCpe1Unwanted = 0;
11163 pDbgState->fCpe2Extra = 0;
11164 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11165 pDbgState->bmExitsToCheck[i] = 0;
11166
11167 /*
11168 * Software interrupts (INT XXh) - no idea how to trigger these...
11169 */
11170 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11171 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11172 || VBOXVMM_INT_SOFTWARE_ENABLED())
11173 {
11174 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11175 }
11176
11177 /*
11178 * INT3 breakpoints - triggered by #BP exceptions.
11179 */
11180 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11181 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11182
11183 /*
11184 * Exception bitmap and XCPT events+probes.
11185 */
11186 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11187 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11188 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11189
11190 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11191 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11192 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11193 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11194 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11195 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11196 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11197 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11198 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11199 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11200 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11201 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11202 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11203 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11204 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11205 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11206 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11207 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11208
11209 if (pDbgState->bmXcptExtra)
11210 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11211
11212 /*
11213 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11214 *
11215 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11216 * So, when adding/changing/removing please don't forget to update it.
11217 *
11218 * Some of the macros are picking up local variables to save horizontal space,
11219 * (being able to see it in a table is the lesser evil here).
11220 */
11221#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11222 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11223 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11224#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11225 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11226 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11227 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11228 } else do { } while (0)
11229#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11230 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11231 { \
11232 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11233 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11234 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11235 } else do { } while (0)
11236#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11237 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11238 { \
11239 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11240 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11241 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11242 } else do { } while (0)
11243#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11244 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11245 { \
11246 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11247 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11248 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11249 } else do { } while (0)
11250
11251 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11252 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11253 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11254 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11255 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11256
11257 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11258 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11259 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11260 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11261 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11262 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11263 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11264 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11265 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11266 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11267 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11268 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11269 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11270 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11271 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11272 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11273 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11274 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11275 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11276 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11277 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11278 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11279 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11280 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11281 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11282 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11283 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11284 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11285 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11286 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11287 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11288 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11289 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11290 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11291 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11292 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11293
11294 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11295 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11296 {
11297 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11298 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11299 AssertRC(rc);
11300
11301#if 0 /** @todo fix me */
11302 pDbgState->fClearCr0Mask = true;
11303 pDbgState->fClearCr4Mask = true;
11304#endif
11305 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11306 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11307 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11308 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11309 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11310 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11311 require clearing here and in the loop if we start using it. */
11312 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11313 }
11314 else
11315 {
11316 if (pDbgState->fClearCr0Mask)
11317 {
11318 pDbgState->fClearCr0Mask = false;
11319 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11320 }
11321 if (pDbgState->fClearCr4Mask)
11322 {
11323 pDbgState->fClearCr4Mask = false;
11324 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11325 }
11326 }
11327 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11328 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11329
11330 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11331 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11332 {
11333 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11334 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11335 }
11336 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11337 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11338
11339 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11340 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11341 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11342 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11343 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11344 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11345 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11346 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11347#if 0 /** @todo too slow, fix handler. */
11348 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11349#endif
11350 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11351
11352 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11353 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11354 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11355 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11356 {
11357 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11358 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11359 }
11360 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11361 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11362 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11363 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11364
11365 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11366 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11367 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11368 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11369 {
11370 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11371 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11372 }
11373 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11374 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11375 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11376 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11377
11378 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11379 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11380 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11381 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11382 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11383 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11384 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11385 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11386 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11387 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11388 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11389 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11390 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11391 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11392 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11393 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11394 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11395 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11396 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11397 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11398 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11399 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11400
11401#undef IS_EITHER_ENABLED
11402#undef SET_ONLY_XBM_IF_EITHER_EN
11403#undef SET_CPE1_XBM_IF_EITHER_EN
11404#undef SET_CPEU_XBM_IF_EITHER_EN
11405#undef SET_CPE2_XBM_IF_EITHER_EN
11406
11407 /*
11408 * Sanitize the control stuff.
11409 */
11410 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11411 if (pDbgState->fCpe2Extra)
11412 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11413 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11414 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11415#ifndef IN_NEM_DARWIN
11416 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11417 {
11418 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11419 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11420 }
11421#else
11422 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11423 {
11424 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11425 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11426 }
11427#endif
11428
11429 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11430 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11431 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11432 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11433}
11434
11435
11436/**
11437 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11438 * appropriate.
11439 *
11440 * The caller has checked the VM-exit against the
11441 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11442 * already, so we don't have to do that either.
11443 *
11444 * @returns Strict VBox status code (i.e. informational status codes too).
11445 * @param pVCpu The cross context virtual CPU structure.
11446 * @param pVmxTransient The VMX-transient structure.
11447 * @param uExitReason The VM-exit reason.
11448 *
11449 * @remarks The name of this function is displayed by dtrace, so keep it short
11450 * and to the point. No longer than 33 chars long, please.
11451 */
11452static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11453{
11454 /*
11455 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11456 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11457 *
11458 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11459 * does. Must add/change/remove both places. Same ordering, please.
11460 *
11461 * Added/removed events must also be reflected in the next section
11462 * where we dispatch dtrace events.
11463 */
11464 bool fDtrace1 = false;
11465 bool fDtrace2 = false;
11466 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11467 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11468 uint32_t uEventArg = 0;
11469#define SET_EXIT(a_EventSubName) \
11470 do { \
11471 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11472 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11473 } while (0)
11474#define SET_BOTH(a_EventSubName) \
11475 do { \
11476 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11477 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11478 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11479 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11480 } while (0)
11481 switch (uExitReason)
11482 {
11483 case VMX_EXIT_MTF:
11484 return vmxHCExitMtf(pVCpu, pVmxTransient);
11485
11486 case VMX_EXIT_XCPT_OR_NMI:
11487 {
11488 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11489 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11490 {
11491 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11492 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11493 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11494 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11495 {
11496 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11497 {
11498 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11499 uEventArg = pVmxTransient->uExitIntErrorCode;
11500 }
11501 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11502 switch (enmEvent1)
11503 {
11504 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11505 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11506 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11507 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11508 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11509 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11510 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11511 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11512 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11513 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11514 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11515 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11516 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11517 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11518 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11519 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11520 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11521 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11522 default: break;
11523 }
11524 }
11525 else
11526 AssertFailed();
11527 break;
11528
11529 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11530 uEventArg = idxVector;
11531 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11532 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11533 break;
11534 }
11535 break;
11536 }
11537
11538 case VMX_EXIT_TRIPLE_FAULT:
11539 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11540 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11541 break;
11542 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11543 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11544 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11545 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11546 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11547
11548 /* Instruction specific VM-exits: */
11549 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11550 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11551 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11552 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11553 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11554 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11555 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11556 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11557 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11558 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11559 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11560 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11561 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11562 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11563 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11564 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11565 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11566 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11567 case VMX_EXIT_MOV_CRX:
11568 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11569 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11570 SET_BOTH(CRX_READ);
11571 else
11572 SET_BOTH(CRX_WRITE);
11573 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11574 break;
11575 case VMX_EXIT_MOV_DRX:
11576 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11577 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11578 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11579 SET_BOTH(DRX_READ);
11580 else
11581 SET_BOTH(DRX_WRITE);
11582 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11583 break;
11584 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11585 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11586 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11587 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11588 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11589 case VMX_EXIT_GDTR_IDTR_ACCESS:
11590 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11591 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11592 {
11593 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11594 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11595 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11596 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11597 }
11598 break;
11599
11600 case VMX_EXIT_LDTR_TR_ACCESS:
11601 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11602 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11603 {
11604 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11605 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11606 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11607 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11608 }
11609 break;
11610
11611 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11612 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11613 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11614 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11615 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11616 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11617 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11618 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11619 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11620 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11621 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11622
11623 /* Events that aren't relevant at this point. */
11624 case VMX_EXIT_EXT_INT:
11625 case VMX_EXIT_INT_WINDOW:
11626 case VMX_EXIT_NMI_WINDOW:
11627 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11628 case VMX_EXIT_PREEMPT_TIMER:
11629 case VMX_EXIT_IO_INSTR:
11630 break;
11631
11632 /* Errors and unexpected events. */
11633 case VMX_EXIT_INIT_SIGNAL:
11634 case VMX_EXIT_SIPI:
11635 case VMX_EXIT_IO_SMI:
11636 case VMX_EXIT_SMI:
11637 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11638 case VMX_EXIT_ERR_MSR_LOAD:
11639 case VMX_EXIT_ERR_MACHINE_CHECK:
11640 case VMX_EXIT_PML_FULL:
11641 case VMX_EXIT_VIRTUALIZED_EOI:
11642 break;
11643
11644 default:
11645 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11646 break;
11647 }
11648#undef SET_BOTH
11649#undef SET_EXIT
11650
11651 /*
11652 * Dtrace tracepoints go first. We do them here at once so we don't
11653 * have to copy the guest state saving and stuff a few dozen times.
11654 * Down side is that we've got to repeat the switch, though this time
11655 * we use enmEvent since the probes are a subset of what DBGF does.
11656 */
11657 if (fDtrace1 || fDtrace2)
11658 {
11659 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11660 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11661 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11662 switch (enmEvent1)
11663 {
11664 /** @todo consider which extra parameters would be helpful for each probe. */
11665 case DBGFEVENT_END: break;
11666 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11667 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11668 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11669 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11670 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11671 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11672 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11673 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11674 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11675 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11676 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11677 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11678 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11679 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11680 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11681 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11682 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11683 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11684 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11685 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11686 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11687 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11688 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11689 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11690 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11691 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11692 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11693 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11694 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11695 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11696 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11697 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11698 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11699 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11700 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11701 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11702 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11703 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11704 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11705 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11706 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11707 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11708 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11709 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11710 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11711 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11712 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11713 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11714 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11715 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11716 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11717 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11718 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11719 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11720 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11721 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11722 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11723 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11724 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11725 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11726 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11727 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11728 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11729 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11730 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11731 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11732 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11733 }
11734 switch (enmEvent2)
11735 {
11736 /** @todo consider which extra parameters would be helpful for each probe. */
11737 case DBGFEVENT_END: break;
11738 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11739 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11740 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11741 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11742 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11743 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11744 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11745 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11746 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11747 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11748 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11749 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11750 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11751 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11752 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11753 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11754 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11755 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11756 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11757 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11758 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11759 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11760 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11761 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11762 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11763 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11764 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11765 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11766 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11767 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11768 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11769 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11770 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11771 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11772 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11773 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11774 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11775 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11776 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11777 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11778 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11779 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11780 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11781 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11782 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11783 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11784 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11785 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11786 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11787 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11788 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11789 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11790 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11791 }
11792 }
11793
11794 /*
11795 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11796 * the DBGF call will do a full check).
11797 *
11798 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11799 * Note! If we have to events, we prioritize the first, i.e. the instruction
11800 * one, in order to avoid event nesting.
11801 */
11802 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11803 if ( enmEvent1 != DBGFEVENT_END
11804 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11805 {
11806 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11807 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11808 if (rcStrict != VINF_SUCCESS)
11809 return rcStrict;
11810 }
11811 else if ( enmEvent2 != DBGFEVENT_END
11812 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11813 {
11814 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11815 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11816 if (rcStrict != VINF_SUCCESS)
11817 return rcStrict;
11818 }
11819
11820 return VINF_SUCCESS;
11821}
11822
11823
11824/**
11825 * Single-stepping VM-exit filtering.
11826 *
11827 * This is preprocessing the VM-exits and deciding whether we've gotten far
11828 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11829 * handling is performed.
11830 *
11831 * @returns Strict VBox status code (i.e. informational status codes too).
11832 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11833 * @param pVmxTransient The VMX-transient structure.
11834 * @param pDbgState The debug state.
11835 */
11836DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11837{
11838 /*
11839 * Expensive (saves context) generic dtrace VM-exit probe.
11840 */
11841 uint32_t const uExitReason = pVmxTransient->uExitReason;
11842 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11843 { /* more likely */ }
11844 else
11845 {
11846 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11847 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11848 AssertRC(rc);
11849 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11850 }
11851
11852#ifndef IN_NEM_DARWIN
11853 /*
11854 * Check for host NMI, just to get that out of the way.
11855 */
11856 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11857 { /* normally likely */ }
11858 else
11859 {
11860 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11861 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11862 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11863 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11864 }
11865#endif
11866
11867 /*
11868 * Check for single stepping event if we're stepping.
11869 */
11870 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11871 {
11872 switch (uExitReason)
11873 {
11874 case VMX_EXIT_MTF:
11875 return vmxHCExitMtf(pVCpu, pVmxTransient);
11876
11877 /* Various events: */
11878 case VMX_EXIT_XCPT_OR_NMI:
11879 case VMX_EXIT_EXT_INT:
11880 case VMX_EXIT_TRIPLE_FAULT:
11881 case VMX_EXIT_INT_WINDOW:
11882 case VMX_EXIT_NMI_WINDOW:
11883 case VMX_EXIT_TASK_SWITCH:
11884 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11885 case VMX_EXIT_APIC_ACCESS:
11886 case VMX_EXIT_EPT_VIOLATION:
11887 case VMX_EXIT_EPT_MISCONFIG:
11888 case VMX_EXIT_PREEMPT_TIMER:
11889
11890 /* Instruction specific VM-exits: */
11891 case VMX_EXIT_CPUID:
11892 case VMX_EXIT_GETSEC:
11893 case VMX_EXIT_HLT:
11894 case VMX_EXIT_INVD:
11895 case VMX_EXIT_INVLPG:
11896 case VMX_EXIT_RDPMC:
11897 case VMX_EXIT_RDTSC:
11898 case VMX_EXIT_RSM:
11899 case VMX_EXIT_VMCALL:
11900 case VMX_EXIT_VMCLEAR:
11901 case VMX_EXIT_VMLAUNCH:
11902 case VMX_EXIT_VMPTRLD:
11903 case VMX_EXIT_VMPTRST:
11904 case VMX_EXIT_VMREAD:
11905 case VMX_EXIT_VMRESUME:
11906 case VMX_EXIT_VMWRITE:
11907 case VMX_EXIT_VMXOFF:
11908 case VMX_EXIT_VMXON:
11909 case VMX_EXIT_MOV_CRX:
11910 case VMX_EXIT_MOV_DRX:
11911 case VMX_EXIT_IO_INSTR:
11912 case VMX_EXIT_RDMSR:
11913 case VMX_EXIT_WRMSR:
11914 case VMX_EXIT_MWAIT:
11915 case VMX_EXIT_MONITOR:
11916 case VMX_EXIT_PAUSE:
11917 case VMX_EXIT_GDTR_IDTR_ACCESS:
11918 case VMX_EXIT_LDTR_TR_ACCESS:
11919 case VMX_EXIT_INVEPT:
11920 case VMX_EXIT_RDTSCP:
11921 case VMX_EXIT_INVVPID:
11922 case VMX_EXIT_WBINVD:
11923 case VMX_EXIT_XSETBV:
11924 case VMX_EXIT_RDRAND:
11925 case VMX_EXIT_INVPCID:
11926 case VMX_EXIT_VMFUNC:
11927 case VMX_EXIT_RDSEED:
11928 case VMX_EXIT_XSAVES:
11929 case VMX_EXIT_XRSTORS:
11930 {
11931 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11932 AssertRCReturn(rc, rc);
11933 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11934 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11935 return VINF_EM_DBG_STEPPED;
11936 break;
11937 }
11938
11939 /* Errors and unexpected events: */
11940 case VMX_EXIT_INIT_SIGNAL:
11941 case VMX_EXIT_SIPI:
11942 case VMX_EXIT_IO_SMI:
11943 case VMX_EXIT_SMI:
11944 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11945 case VMX_EXIT_ERR_MSR_LOAD:
11946 case VMX_EXIT_ERR_MACHINE_CHECK:
11947 case VMX_EXIT_PML_FULL:
11948 case VMX_EXIT_VIRTUALIZED_EOI:
11949 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11950 break;
11951
11952 default:
11953 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11954 break;
11955 }
11956 }
11957
11958 /*
11959 * Check for debugger event breakpoints and dtrace probes.
11960 */
11961 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11962 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11963 {
11964 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11965 if (rcStrict != VINF_SUCCESS)
11966 return rcStrict;
11967 }
11968
11969 /*
11970 * Normal processing.
11971 */
11972#ifdef HMVMX_USE_FUNCTION_TABLE
11973 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11974#else
11975 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11976#endif
11977}
11978
11979/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette