VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 97529

Last change on this file since 97529 was 97335, checked in by vboxsync, 2 years ago

VMM/HMVMXR0: Address issues in vmxHCAdvanceGuestRipBy wrt CS segment wraparound and RF.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 523.4 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 97335 2022-10-28 14:18:01Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330 VMX_VMCS16_HLAT_PREFIX_SIZE,
331
332 /* 16-bit guest-state fields. */
333 VMX_VMCS16_GUEST_ES_SEL,
334 VMX_VMCS16_GUEST_CS_SEL,
335 VMX_VMCS16_GUEST_SS_SEL,
336 VMX_VMCS16_GUEST_DS_SEL,
337 VMX_VMCS16_GUEST_FS_SEL,
338 VMX_VMCS16_GUEST_GS_SEL,
339 VMX_VMCS16_GUEST_LDTR_SEL,
340 VMX_VMCS16_GUEST_TR_SEL,
341 VMX_VMCS16_GUEST_INTR_STATUS,
342 VMX_VMCS16_GUEST_PML_INDEX,
343
344 /* 16-bits host-state fields. */
345 VMX_VMCS16_HOST_ES_SEL,
346 VMX_VMCS16_HOST_CS_SEL,
347 VMX_VMCS16_HOST_SS_SEL,
348 VMX_VMCS16_HOST_DS_SEL,
349 VMX_VMCS16_HOST_FS_SEL,
350 VMX_VMCS16_HOST_GS_SEL,
351 VMX_VMCS16_HOST_TR_SEL,
352
353 /* 64-bit control fields. */
354 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
355 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
357 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
358 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
359 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
361 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
363 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
365 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
367 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
369 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
370 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
371 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
373 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
375 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
377 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
379 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
380 VMX_VMCS64_CTRL_EPTP_FULL,
381 VMX_VMCS64_CTRL_EPTP_HIGH,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
383 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
385 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
387 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
389 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
390 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
391 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
393 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
395 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
397 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
399 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
401 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
402 VMX_VMCS64_CTRL_SPPTP_FULL,
403 VMX_VMCS64_CTRL_SPPTP_HIGH,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
405 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
406 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
407 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
410 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_FULL,
411 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_HIGH,
412 VMX_VMCS64_CTRL_HLAT_PTR_FULL,
413 VMX_VMCS64_CTRL_HLAT_PTR_HIGH,
414 VMX_VMCS64_CTRL_EXIT2_FULL,
415 VMX_VMCS64_CTRL_EXIT2_HIGH,
416
417 /* 64-bit read-only data fields. */
418 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
419 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
420
421 /* 64-bit guest-state fields. */
422 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
423 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
424 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
425 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
426 VMX_VMCS64_GUEST_PAT_FULL,
427 VMX_VMCS64_GUEST_PAT_HIGH,
428 VMX_VMCS64_GUEST_EFER_FULL,
429 VMX_VMCS64_GUEST_EFER_HIGH,
430 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
431 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
432 VMX_VMCS64_GUEST_PDPTE0_FULL,
433 VMX_VMCS64_GUEST_PDPTE0_HIGH,
434 VMX_VMCS64_GUEST_PDPTE1_FULL,
435 VMX_VMCS64_GUEST_PDPTE1_HIGH,
436 VMX_VMCS64_GUEST_PDPTE2_FULL,
437 VMX_VMCS64_GUEST_PDPTE2_HIGH,
438 VMX_VMCS64_GUEST_PDPTE3_FULL,
439 VMX_VMCS64_GUEST_PDPTE3_HIGH,
440 VMX_VMCS64_GUEST_BNDCFGS_FULL,
441 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
442 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
443 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
444 VMX_VMCS64_GUEST_PKRS_FULL,
445 VMX_VMCS64_GUEST_PKRS_HIGH,
446
447 /* 64-bit host-state fields. */
448 VMX_VMCS64_HOST_PAT_FULL,
449 VMX_VMCS64_HOST_PAT_HIGH,
450 VMX_VMCS64_HOST_EFER_FULL,
451 VMX_VMCS64_HOST_EFER_HIGH,
452 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
453 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
454 VMX_VMCS64_HOST_PKRS_FULL,
455 VMX_VMCS64_HOST_PKRS_HIGH,
456
457 /* 32-bit control fields. */
458 VMX_VMCS32_CTRL_PIN_EXEC,
459 VMX_VMCS32_CTRL_PROC_EXEC,
460 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
461 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
462 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
463 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
464 VMX_VMCS32_CTRL_EXIT,
465 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
466 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
467 VMX_VMCS32_CTRL_ENTRY,
468 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
469 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
470 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
471 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
472 VMX_VMCS32_CTRL_TPR_THRESHOLD,
473 VMX_VMCS32_CTRL_PROC_EXEC2,
474 VMX_VMCS32_CTRL_PLE_GAP,
475 VMX_VMCS32_CTRL_PLE_WINDOW,
476
477 /* 32-bits read-only fields. */
478 VMX_VMCS32_RO_VM_INSTR_ERROR,
479 VMX_VMCS32_RO_EXIT_REASON,
480 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
481 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
482 VMX_VMCS32_RO_IDT_VECTORING_INFO,
483 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
484 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
485 VMX_VMCS32_RO_EXIT_INSTR_INFO,
486
487 /* 32-bit guest-state fields. */
488 VMX_VMCS32_GUEST_ES_LIMIT,
489 VMX_VMCS32_GUEST_CS_LIMIT,
490 VMX_VMCS32_GUEST_SS_LIMIT,
491 VMX_VMCS32_GUEST_DS_LIMIT,
492 VMX_VMCS32_GUEST_FS_LIMIT,
493 VMX_VMCS32_GUEST_GS_LIMIT,
494 VMX_VMCS32_GUEST_LDTR_LIMIT,
495 VMX_VMCS32_GUEST_TR_LIMIT,
496 VMX_VMCS32_GUEST_GDTR_LIMIT,
497 VMX_VMCS32_GUEST_IDTR_LIMIT,
498 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
501 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
502 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
503 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
504 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
505 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
506 VMX_VMCS32_GUEST_INT_STATE,
507 VMX_VMCS32_GUEST_ACTIVITY_STATE,
508 VMX_VMCS32_GUEST_SMBASE,
509 VMX_VMCS32_GUEST_SYSENTER_CS,
510 VMX_VMCS32_PREEMPT_TIMER_VALUE,
511
512 /* 32-bit host-state fields. */
513 VMX_VMCS32_HOST_SYSENTER_CS,
514
515 /* Natural-width control fields. */
516 VMX_VMCS_CTRL_CR0_MASK,
517 VMX_VMCS_CTRL_CR4_MASK,
518 VMX_VMCS_CTRL_CR0_READ_SHADOW,
519 VMX_VMCS_CTRL_CR4_READ_SHADOW,
520 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
521 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
522 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
523 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
524
525 /* Natural-width read-only data fields. */
526 VMX_VMCS_RO_EXIT_QUALIFICATION,
527 VMX_VMCS_RO_IO_RCX,
528 VMX_VMCS_RO_IO_RSI,
529 VMX_VMCS_RO_IO_RDI,
530 VMX_VMCS_RO_IO_RIP,
531 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
532
533 /* Natural-width guest-state field */
534 VMX_VMCS_GUEST_CR0,
535 VMX_VMCS_GUEST_CR3,
536 VMX_VMCS_GUEST_CR4,
537 VMX_VMCS_GUEST_ES_BASE,
538 VMX_VMCS_GUEST_CS_BASE,
539 VMX_VMCS_GUEST_SS_BASE,
540 VMX_VMCS_GUEST_DS_BASE,
541 VMX_VMCS_GUEST_FS_BASE,
542 VMX_VMCS_GUEST_GS_BASE,
543 VMX_VMCS_GUEST_LDTR_BASE,
544 VMX_VMCS_GUEST_TR_BASE,
545 VMX_VMCS_GUEST_GDTR_BASE,
546 VMX_VMCS_GUEST_IDTR_BASE,
547 VMX_VMCS_GUEST_DR7,
548 VMX_VMCS_GUEST_RSP,
549 VMX_VMCS_GUEST_RIP,
550 VMX_VMCS_GUEST_RFLAGS,
551 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
552 VMX_VMCS_GUEST_SYSENTER_ESP,
553 VMX_VMCS_GUEST_SYSENTER_EIP,
554 VMX_VMCS_GUEST_S_CET,
555 VMX_VMCS_GUEST_SSP,
556 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
557
558 /* Natural-width host-state fields */
559 VMX_VMCS_HOST_CR0,
560 VMX_VMCS_HOST_CR3,
561 VMX_VMCS_HOST_CR4,
562 VMX_VMCS_HOST_FS_BASE,
563 VMX_VMCS_HOST_GS_BASE,
564 VMX_VMCS_HOST_TR_BASE,
565 VMX_VMCS_HOST_GDTR_BASE,
566 VMX_VMCS_HOST_IDTR_BASE,
567 VMX_VMCS_HOST_SYSENTER_ESP,
568 VMX_VMCS_HOST_SYSENTER_EIP,
569 VMX_VMCS_HOST_RSP,
570 VMX_VMCS_HOST_RIP,
571 VMX_VMCS_HOST_S_CET,
572 VMX_VMCS_HOST_SSP,
573 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
574};
575#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
576
577#ifdef HMVMX_USE_FUNCTION_TABLE
578/**
579 * VMX_EXIT dispatch table.
580 */
581static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
582{
583 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
584 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
585 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
586 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
587 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
588 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
589 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
590 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
591 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
592 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
593 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
594 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
595 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
596 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
597 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
598 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
599 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
600 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
601 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
602#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
603 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
604 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
605 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
606 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
607 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
608 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
609 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
610 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
611 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
612#else
613 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
614 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
615 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
616 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
617 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
618 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
619 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
620 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
621 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
622#endif
623 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
624 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
625 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
626 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
627 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
628 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
629 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
630 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
632 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
633 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
634 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
635 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
636 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
637 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
638 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
639 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
640 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
641 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
642 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
643 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
644 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
646 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
647#else
648 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
651 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
652#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
653 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
654#else
655 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
656#endif
657 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
658 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
659 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
660 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
661 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
662 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
663 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
664 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
665 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
666 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
667 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
668 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
669 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
670 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
671 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
672 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
673};
674#endif /* HMVMX_USE_FUNCTION_TABLE */
675
676#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
677static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
678{
679 /* 0 */ "(Not Used)",
680 /* 1 */ "VMCALL executed in VMX root operation.",
681 /* 2 */ "VMCLEAR with invalid physical address.",
682 /* 3 */ "VMCLEAR with VMXON pointer.",
683 /* 4 */ "VMLAUNCH with non-clear VMCS.",
684 /* 5 */ "VMRESUME with non-launched VMCS.",
685 /* 6 */ "VMRESUME after VMXOFF",
686 /* 7 */ "VM-entry with invalid control fields.",
687 /* 8 */ "VM-entry with invalid host state fields.",
688 /* 9 */ "VMPTRLD with invalid physical address.",
689 /* 10 */ "VMPTRLD with VMXON pointer.",
690 /* 11 */ "VMPTRLD with incorrect revision identifier.",
691 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
692 /* 13 */ "VMWRITE to read-only VMCS component.",
693 /* 14 */ "(Not Used)",
694 /* 15 */ "VMXON executed in VMX root operation.",
695 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
696 /* 17 */ "VM-entry with non-launched executing VMCS.",
697 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
698 /* 19 */ "VMCALL with non-clear VMCS.",
699 /* 20 */ "VMCALL with invalid VM-exit control fields.",
700 /* 21 */ "(Not Used)",
701 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
702 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
703 /* 24 */ "VMCALL with invalid SMM-monitor features.",
704 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
705 /* 26 */ "VM-entry with events blocked by MOV SS.",
706 /* 27 */ "(Not Used)",
707 /* 28 */ "Invalid operand to INVEPT/INVVPID."
708};
709#endif /* VBOX_STRICT && LOG_ENABLED */
710
711
712/**
713 * Gets the CR0 guest/host mask.
714 *
715 * These bits typically does not change through the lifetime of a VM. Any bit set in
716 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
717 * by the guest.
718 *
719 * @returns The CR0 guest/host mask.
720 * @param pVCpu The cross context virtual CPU structure.
721 */
722static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
723{
724 /*
725 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
726 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
727 *
728 * Furthermore, modifications to any bits that are reserved/unspecified currently
729 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
730 * when future CPUs specify and use currently reserved/unspecified bits.
731 */
732 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
733 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
734 * and @bugref{6944}. */
735 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
736 return ( X86_CR0_PE
737 | X86_CR0_NE
738 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
739 | X86_CR0_PG
740 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
741}
742
743
744/**
745 * Gets the CR4 guest/host mask.
746 *
747 * These bits typically does not change through the lifetime of a VM. Any bit set in
748 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
749 * by the guest.
750 *
751 * @returns The CR4 guest/host mask.
752 * @param pVCpu The cross context virtual CPU structure.
753 */
754static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
755{
756 /*
757 * We construct a mask of all CR4 bits that the guest can modify without causing
758 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
759 * a VM-exit when the guest attempts to modify them when executing using
760 * hardware-assisted VMX.
761 *
762 * When a feature is not exposed to the guest (and may be present on the host),
763 * we want to intercept guest modifications to the bit so we can emulate proper
764 * behavior (e.g., #GP).
765 *
766 * Furthermore, only modifications to those bits that don't require immediate
767 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
768 * depends on CR3 which might not always be the guest value while executing
769 * using hardware-assisted VMX.
770 */
771 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
772 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
773#ifdef IN_NEM_DARWIN
774 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
775#endif
776 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
777
778 /*
779 * Paranoia.
780 * Ensure features exposed to the guest are present on the host.
781 */
782 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
783#ifdef IN_NEM_DARWIN
784 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
785#endif
786 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
787
788 uint64_t const fGstMask = X86_CR4_PVI
789 | X86_CR4_TSD
790 | X86_CR4_DE
791 | X86_CR4_MCE
792 | X86_CR4_PCE
793 | X86_CR4_OSXMMEEXCPT
794 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
795#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
796 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
797 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
798#endif
799 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
800 return ~fGstMask;
801}
802
803
804/**
805 * Adds one or more exceptions to the exception bitmap and commits it to the current
806 * VMCS.
807 *
808 * @param pVCpu The cross context virtual CPU structure.
809 * @param pVmxTransient The VMX-transient structure.
810 * @param uXcptMask The exception(s) to add.
811 */
812static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
813{
814 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
815 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
816 if ((uXcptBitmap & uXcptMask) != uXcptMask)
817 {
818 uXcptBitmap |= uXcptMask;
819 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
820 AssertRC(rc);
821 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
822 }
823}
824
825
826/**
827 * Adds an exception to the exception bitmap and commits it to the current VMCS.
828 *
829 * @param pVCpu The cross context virtual CPU structure.
830 * @param pVmxTransient The VMX-transient structure.
831 * @param uXcpt The exception to add.
832 */
833static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
834{
835 Assert(uXcpt <= X86_XCPT_LAST);
836 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
837}
838
839
840/**
841 * Remove one or more exceptions from the exception bitmap and commits it to the
842 * current VMCS.
843 *
844 * This takes care of not removing the exception intercept if a nested-guest
845 * requires the exception to be intercepted.
846 *
847 * @returns VBox status code.
848 * @param pVCpu The cross context virtual CPU structure.
849 * @param pVmxTransient The VMX-transient structure.
850 * @param uXcptMask The exception(s) to remove.
851 */
852static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
853{
854 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
855 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
856 if (u32XcptBitmap & uXcptMask)
857 {
858#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
859 if (!pVmxTransient->fIsNestedGuest)
860 { /* likely */ }
861 else
862 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
863#endif
864#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
865 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
866 | RT_BIT(X86_XCPT_DE)
867 | RT_BIT(X86_XCPT_NM)
868 | RT_BIT(X86_XCPT_TS)
869 | RT_BIT(X86_XCPT_UD)
870 | RT_BIT(X86_XCPT_NP)
871 | RT_BIT(X86_XCPT_SS)
872 | RT_BIT(X86_XCPT_GP)
873 | RT_BIT(X86_XCPT_PF)
874 | RT_BIT(X86_XCPT_MF));
875#elif defined(HMVMX_ALWAYS_TRAP_PF)
876 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
877#endif
878 if (uXcptMask)
879 {
880 /* Validate we are not removing any essential exception intercepts. */
881#ifndef IN_NEM_DARWIN
882 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
883#else
884 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
885#endif
886 NOREF(pVCpu);
887 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
888 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
889
890 /* Remove it from the exception bitmap. */
891 u32XcptBitmap &= ~uXcptMask;
892
893 /* Commit and update the cache if necessary. */
894 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
895 {
896 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
897 AssertRC(rc);
898 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
899 }
900 }
901 }
902 return VINF_SUCCESS;
903}
904
905
906/**
907 * Remove an exceptions from the exception bitmap and commits it to the current
908 * VMCS.
909 *
910 * @returns VBox status code.
911 * @param pVCpu The cross context virtual CPU structure.
912 * @param pVmxTransient The VMX-transient structure.
913 * @param uXcpt The exception to remove.
914 */
915static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
916{
917 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
918}
919
920#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
921
922/**
923 * Loads the shadow VMCS specified by the VMCS info. object.
924 *
925 * @returns VBox status code.
926 * @param pVmcsInfo The VMCS info. object.
927 *
928 * @remarks Can be called with interrupts disabled.
929 */
930static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
931{
932 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
933 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
934
935 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
936 if (RT_SUCCESS(rc))
937 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
938 return rc;
939}
940
941
942/**
943 * Clears the shadow VMCS specified by the VMCS info. object.
944 *
945 * @returns VBox status code.
946 * @param pVmcsInfo The VMCS info. object.
947 *
948 * @remarks Can be called with interrupts disabled.
949 */
950static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
951{
952 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
953 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
954
955 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
956 if (RT_SUCCESS(rc))
957 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
958 return rc;
959}
960
961
962/**
963 * Switches from and to the specified VMCSes.
964 *
965 * @returns VBox status code.
966 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
967 * @param pVmcsInfoTo The VMCS info. object we are switching to.
968 *
969 * @remarks Called with interrupts disabled.
970 */
971static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
972{
973 /*
974 * Clear the VMCS we are switching out if it has not already been cleared.
975 * This will sync any CPU internal data back to the VMCS.
976 */
977 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
978 {
979 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
980 if (RT_SUCCESS(rc))
981 {
982 /*
983 * The shadow VMCS, if any, would not be active at this point since we
984 * would have cleared it while importing the virtual hardware-virtualization
985 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
986 * clear the shadow VMCS here, just assert for safety.
987 */
988 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
989 }
990 else
991 return rc;
992 }
993
994 /*
995 * Clear the VMCS we are switching to if it has not already been cleared.
996 * This will initialize the VMCS launch state to "clear" required for loading it.
997 *
998 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
999 */
1000 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1001 {
1002 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1003 if (RT_SUCCESS(rc))
1004 { /* likely */ }
1005 else
1006 return rc;
1007 }
1008
1009 /*
1010 * Finally, load the VMCS we are switching to.
1011 */
1012 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1013}
1014
1015
1016/**
1017 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1018 * caller.
1019 *
1020 * @returns VBox status code.
1021 * @param pVCpu The cross context virtual CPU structure.
1022 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1023 * true) or guest VMCS (pass false).
1024 */
1025static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1026{
1027 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1028 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1029
1030 PVMXVMCSINFO pVmcsInfoFrom;
1031 PVMXVMCSINFO pVmcsInfoTo;
1032 if (fSwitchToNstGstVmcs)
1033 {
1034 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1035 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1036 }
1037 else
1038 {
1039 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1040 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1041 }
1042
1043 /*
1044 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1045 * preemption hook code path acquires the current VMCS.
1046 */
1047 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1048
1049 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1050 if (RT_SUCCESS(rc))
1051 {
1052 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1053 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1054
1055 /*
1056 * If we are switching to a VMCS that was executed on a different host CPU or was
1057 * never executed before, flag that we need to export the host state before executing
1058 * guest/nested-guest code using hardware-assisted VMX.
1059 *
1060 * This could probably be done in a preemptible context since the preemption hook
1061 * will flag the necessary change in host context. However, since preemption is
1062 * already disabled and to avoid making assumptions about host specific code in
1063 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1064 * disabled.
1065 */
1066 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1067 { /* likely */ }
1068 else
1069 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1070
1071 ASMSetFlags(fEFlags);
1072
1073 /*
1074 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1075 * flag that we need to update the host MSR values there. Even if we decide in the
1076 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1077 * if its content differs, we would have to update the host MSRs anyway.
1078 */
1079 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1080 }
1081 else
1082 ASMSetFlags(fEFlags);
1083 return rc;
1084}
1085
1086#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1087#ifdef VBOX_STRICT
1088
1089/**
1090 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1091 * transient structure.
1092 *
1093 * @param pVCpu The cross context virtual CPU structure.
1094 * @param pVmxTransient The VMX-transient structure.
1095 */
1096DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1097{
1098 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1099 AssertRC(rc);
1100}
1101
1102
1103/**
1104 * Reads the VM-entry exception error code field from the VMCS into
1105 * the VMX transient structure.
1106 *
1107 * @param pVCpu The cross context virtual CPU structure.
1108 * @param pVmxTransient The VMX-transient structure.
1109 */
1110DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1111{
1112 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1113 AssertRC(rc);
1114}
1115
1116
1117/**
1118 * Reads the VM-entry exception error code field from the VMCS into
1119 * the VMX transient structure.
1120 *
1121 * @param pVCpu The cross context virtual CPU structure.
1122 * @param pVmxTransient The VMX-transient structure.
1123 */
1124DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1125{
1126 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1127 AssertRC(rc);
1128}
1129
1130#endif /* VBOX_STRICT */
1131
1132
1133/**
1134 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1135 *
1136 * Don't call directly unless the it's likely that some or all of the fields
1137 * given in @a a_fReadMask have already been read.
1138 *
1139 * @tparam a_fReadMask The fields to read.
1140 * @param pVCpu The cross context virtual CPU structure.
1141 * @param pVmxTransient The VMX-transient structure.
1142 */
1143template<uint32_t const a_fReadMask>
1144static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1145{
1146 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1147 | HMVMX_READ_EXIT_INSTR_LEN
1148 | HMVMX_READ_EXIT_INSTR_INFO
1149 | HMVMX_READ_IDT_VECTORING_INFO
1150 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1151 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1152 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1153 | HMVMX_READ_GUEST_LINEAR_ADDR
1154 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1155 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1156 )) == 0);
1157
1158 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1159 {
1160 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1161
1162 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1163 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1164 {
1165 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1166 AssertRC(rc);
1167 }
1168 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1169 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1170 {
1171 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1172 AssertRC(rc);
1173 }
1174 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1175 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1176 {
1177 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1178 AssertRC(rc);
1179 }
1180 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1181 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1182 {
1183 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1184 AssertRC(rc);
1185 }
1186 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1187 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1188 {
1189 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1190 AssertRC(rc);
1191 }
1192 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1193 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1194 {
1195 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1196 AssertRC(rc);
1197 }
1198 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1199 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1200 {
1201 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1202 AssertRC(rc);
1203 }
1204 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1205 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1206 {
1207 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1208 AssertRC(rc);
1209 }
1210 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1211 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1212 {
1213 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1214 AssertRC(rc);
1215 }
1216 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1217 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1218 {
1219 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1220 AssertRC(rc);
1221 }
1222
1223 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1224 }
1225}
1226
1227
1228/**
1229 * Reads VMCS fields into the VMXTRANSIENT structure.
1230 *
1231 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1232 * generating an optimized read sequences w/o any conditionals between in
1233 * non-strict builds.
1234 *
1235 * @tparam a_fReadMask The fields to read. One or more of the
1236 * HMVMX_READ_XXX fields ORed together.
1237 * @param pVCpu The cross context virtual CPU structure.
1238 * @param pVmxTransient The VMX-transient structure.
1239 */
1240template<uint32_t const a_fReadMask>
1241DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1242{
1243 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1244 | HMVMX_READ_EXIT_INSTR_LEN
1245 | HMVMX_READ_EXIT_INSTR_INFO
1246 | HMVMX_READ_IDT_VECTORING_INFO
1247 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1248 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1249 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1250 | HMVMX_READ_GUEST_LINEAR_ADDR
1251 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1252 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1253 )) == 0);
1254
1255 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1256 {
1257 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1258 {
1259 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1260 AssertRC(rc);
1261 }
1262 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1263 {
1264 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1265 AssertRC(rc);
1266 }
1267 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1268 {
1269 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1270 AssertRC(rc);
1271 }
1272 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1273 {
1274 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1275 AssertRC(rc);
1276 }
1277 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1278 {
1279 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1280 AssertRC(rc);
1281 }
1282 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1283 {
1284 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1285 AssertRC(rc);
1286 }
1287 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1288 {
1289 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1290 AssertRC(rc);
1291 }
1292 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1293 {
1294 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1295 AssertRC(rc);
1296 }
1297 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1298 {
1299 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1300 AssertRC(rc);
1301 }
1302 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1303 {
1304 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1305 AssertRC(rc);
1306 }
1307
1308 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1309 }
1310 else
1311 {
1312 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1313 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1314 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1315 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1316 }
1317}
1318
1319
1320#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1321/**
1322 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1323 *
1324 * @param pVCpu The cross context virtual CPU structure.
1325 * @param pVmxTransient The VMX-transient structure.
1326 */
1327static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1328{
1329 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1330 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1331 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1332 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1333 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1334 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1335 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1336 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1337 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1338 AssertRC(rc);
1339 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1340 | HMVMX_READ_EXIT_INSTR_LEN
1341 | HMVMX_READ_EXIT_INSTR_INFO
1342 | HMVMX_READ_IDT_VECTORING_INFO
1343 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1344 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1345 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1346 | HMVMX_READ_GUEST_LINEAR_ADDR
1347 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1348}
1349#endif
1350
1351/**
1352 * Verifies that our cached values of the VMCS fields are all consistent with
1353 * what's actually present in the VMCS.
1354 *
1355 * @returns VBox status code.
1356 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1357 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1358 * VMCS content. HMCPU error-field is
1359 * updated, see VMX_VCI_XXX.
1360 * @param pVCpu The cross context virtual CPU structure.
1361 * @param pVmcsInfo The VMCS info. object.
1362 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1363 */
1364static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1365{
1366 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1367
1368 uint32_t u32Val;
1369 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1370 AssertRC(rc);
1371 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1372 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1373 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1374 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1375
1376 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1377 AssertRC(rc);
1378 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1379 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1380 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1381 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1382
1383 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1384 AssertRC(rc);
1385 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1386 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1387 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1388 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1389
1390 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1391 AssertRC(rc);
1392 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1393 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1394 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1395 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1396
1397 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1398 {
1399 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1400 AssertRC(rc);
1401 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1402 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1403 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1404 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1405 }
1406
1407 uint64_t u64Val;
1408 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1409 {
1410 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1411 AssertRC(rc);
1412 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1413 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1414 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1415 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1416 }
1417
1418 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1419 AssertRC(rc);
1420 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1421 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1422 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1423 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1424
1425 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1426 AssertRC(rc);
1427 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1428 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1429 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1430 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1431
1432 NOREF(pcszVmcs);
1433 return VINF_SUCCESS;
1434}
1435
1436
1437/**
1438 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1439 * VMCS.
1440 *
1441 * This is typically required when the guest changes paging mode.
1442 *
1443 * @returns VBox status code.
1444 * @param pVCpu The cross context virtual CPU structure.
1445 * @param pVmxTransient The VMX-transient structure.
1446 *
1447 * @remarks Requires EFER.
1448 * @remarks No-long-jump zone!!!
1449 */
1450static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1451{
1452 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1453 {
1454 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1455 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1456
1457 /*
1458 * VM-entry controls.
1459 */
1460 {
1461 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1462 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1463
1464 /*
1465 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1466 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1467 *
1468 * For nested-guests, this is a mandatory VM-entry control. It's also
1469 * required because we do not want to leak host bits to the nested-guest.
1470 */
1471 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1472
1473 /*
1474 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1475 *
1476 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1477 * required to get the nested-guest working with hardware-assisted VMX execution.
1478 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1479 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1480 * here rather than while merging the guest VMCS controls.
1481 */
1482 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1483 {
1484 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1485 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1486 }
1487 else
1488 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1489
1490 /*
1491 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1492 *
1493 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1494 * regardless of whether the nested-guest VMCS specifies it because we are free to
1495 * load whatever MSRs we require and we do not need to modify the guest visible copy
1496 * of the VM-entry MSR load area.
1497 */
1498 if ( g_fHmVmxSupportsVmcsEfer
1499#ifndef IN_NEM_DARWIN
1500 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1501#endif
1502 )
1503 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1504 else
1505 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1506
1507 /*
1508 * The following should -not- be set (since we're not in SMM mode):
1509 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1510 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1511 */
1512
1513 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1514 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1515
1516 if ((fVal & fZap) == fVal)
1517 { /* likely */ }
1518 else
1519 {
1520 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1521 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1522 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1523 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1524 }
1525
1526 /* Commit it to the VMCS. */
1527 if (pVmcsInfo->u32EntryCtls != fVal)
1528 {
1529 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1530 AssertRC(rc);
1531 pVmcsInfo->u32EntryCtls = fVal;
1532 }
1533 }
1534
1535 /*
1536 * VM-exit controls.
1537 */
1538 {
1539 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1540 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1541
1542 /*
1543 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1544 * supported the 1-setting of this bit.
1545 *
1546 * For nested-guests, we set the "save debug controls" as the converse
1547 * "load debug controls" is mandatory for nested-guests anyway.
1548 */
1549 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1550
1551 /*
1552 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1553 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1554 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1555 * vmxHCExportHostMsrs().
1556 *
1557 * For nested-guests, we always set this bit as we do not support 32-bit
1558 * hosts.
1559 */
1560 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1561
1562#ifndef IN_NEM_DARWIN
1563 /*
1564 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1565 *
1566 * For nested-guests, we should use the "save IA32_EFER" control if we also
1567 * used the "load IA32_EFER" control while exporting VM-entry controls.
1568 */
1569 if ( g_fHmVmxSupportsVmcsEfer
1570 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1571 {
1572 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1573 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1574 }
1575#endif
1576
1577 /*
1578 * Enable saving of the VMX-preemption timer value on VM-exit.
1579 * For nested-guests, currently not exposed/used.
1580 */
1581 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1582 * the timer value. */
1583 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1584 {
1585 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1586 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1587 }
1588
1589 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1590 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1591
1592 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1593 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1594 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1595
1596 if ((fVal & fZap) == fVal)
1597 { /* likely */ }
1598 else
1599 {
1600 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1601 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1602 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1603 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1604 }
1605
1606 /* Commit it to the VMCS. */
1607 if (pVmcsInfo->u32ExitCtls != fVal)
1608 {
1609 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1610 AssertRC(rc);
1611 pVmcsInfo->u32ExitCtls = fVal;
1612 }
1613 }
1614
1615 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1616 }
1617 return VINF_SUCCESS;
1618}
1619
1620
1621/**
1622 * Sets the TPR threshold in the VMCS.
1623 *
1624 * @param pVCpu The cross context virtual CPU structure.
1625 * @param pVmcsInfo The VMCS info. object.
1626 * @param u32TprThreshold The TPR threshold (task-priority class only).
1627 */
1628DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1629{
1630 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1631 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1632 RT_NOREF(pVmcsInfo);
1633 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1634 AssertRC(rc);
1635}
1636
1637
1638/**
1639 * Exports the guest APIC TPR state into the VMCS.
1640 *
1641 * @param pVCpu The cross context virtual CPU structure.
1642 * @param pVmxTransient The VMX-transient structure.
1643 *
1644 * @remarks No-long-jump zone!!!
1645 */
1646static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1647{
1648 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1649 {
1650 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1651
1652 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1653 if (!pVmxTransient->fIsNestedGuest)
1654 {
1655 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1656 && APICIsEnabled(pVCpu))
1657 {
1658 /*
1659 * Setup TPR shadowing.
1660 */
1661 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1662 {
1663 bool fPendingIntr = false;
1664 uint8_t u8Tpr = 0;
1665 uint8_t u8PendingIntr = 0;
1666 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1667 AssertRC(rc);
1668
1669 /*
1670 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1671 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1672 * priority of the pending interrupt so we can deliver the interrupt. If there
1673 * are no interrupts pending, set threshold to 0 to not cause any
1674 * TPR-below-threshold VM-exits.
1675 */
1676 uint32_t u32TprThreshold = 0;
1677 if (fPendingIntr)
1678 {
1679 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1680 (which is the Task-Priority Class). */
1681 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1682 const uint8_t u8TprPriority = u8Tpr >> 4;
1683 if (u8PendingPriority <= u8TprPriority)
1684 u32TprThreshold = u8PendingPriority;
1685 }
1686
1687 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1688 }
1689 }
1690 }
1691 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1692 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1693 }
1694}
1695
1696
1697/**
1698 * Gets the guest interruptibility-state and updates related force-flags.
1699 *
1700 * @returns Guest's interruptibility-state.
1701 * @param pVCpu The cross context virtual CPU structure.
1702 *
1703 * @remarks No-long-jump zone!!!
1704 */
1705static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1706{
1707 uint32_t fIntrState;
1708
1709 /*
1710 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1711 */
1712 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1713 fIntrState = 0;
1714 else
1715 {
1716 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1717 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1718
1719 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1720 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1721 else
1722 {
1723 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1724
1725 /* Block-by-STI must not be set when interrupts are disabled. */
1726 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1727 }
1728 }
1729
1730 /*
1731 * Check if we should inhibit NMI delivery.
1732 */
1733 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1734 { /* likely */ }
1735 else
1736 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1737
1738 /*
1739 * Validate.
1740 */
1741 /* We don't support block-by-SMI yet.*/
1742 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1743
1744 return fIntrState;
1745}
1746
1747
1748/**
1749 * Exports the exception intercepts required for guest execution in the VMCS.
1750 *
1751 * @param pVCpu The cross context virtual CPU structure.
1752 * @param pVmxTransient The VMX-transient structure.
1753 *
1754 * @remarks No-long-jump zone!!!
1755 */
1756static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1757{
1758 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1759 {
1760 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1761 if ( !pVmxTransient->fIsNestedGuest
1762 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1763 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1764 else
1765 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1766
1767 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1768 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1769 }
1770}
1771
1772
1773/**
1774 * Exports the guest's RIP into the guest-state area in the VMCS.
1775 *
1776 * @param pVCpu The cross context virtual CPU structure.
1777 *
1778 * @remarks No-long-jump zone!!!
1779 */
1780static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1781{
1782 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1783 {
1784 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1785
1786 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1787 AssertRC(rc);
1788
1789 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1790 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1791 }
1792}
1793
1794
1795/**
1796 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1797 *
1798 * @param pVCpu The cross context virtual CPU structure.
1799 * @param pVmxTransient The VMX-transient structure.
1800 *
1801 * @remarks No-long-jump zone!!!
1802 */
1803static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1804{
1805 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1806 {
1807 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1808
1809 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits
1810 of RFLAGS are reserved (MBZ). We use bits 63:24 for internal purposes, so no need
1811 to assert this, the CPUMX86EFLAGS/CPUMX86RFLAGS union masks these off for us.
1812 Use 32-bit VMWRITE. */
1813 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
1814 Assert((fEFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
1815 AssertMsg(!(fEFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), ("%#x\n", fEFlags));
1816
1817#ifndef IN_NEM_DARWIN
1818 /*
1819 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1820 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1821 * can run the real-mode guest code under Virtual 8086 mode.
1822 */
1823 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1824 if (pVmcsInfo->RealMode.fRealOnV86Active)
1825 {
1826 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1827 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1828 Assert(!pVmxTransient->fIsNestedGuest);
1829 pVmcsInfo->RealMode.Eflags.u32 = fEFlags; /* Save the original eflags of the real-mode guest. */
1830 fEFlags |= X86_EFL_VM; /* Set the Virtual 8086 mode bit. */
1831 fEFlags &= ~X86_EFL_IOPL; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1832 }
1833#else
1834 RT_NOREF(pVmxTransient);
1835#endif
1836
1837 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags);
1838 AssertRC(rc);
1839
1840 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1841 Log4Func(("eflags=%#RX32\n", fEFlags));
1842 }
1843}
1844
1845
1846#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1847/**
1848 * Copies the nested-guest VMCS to the shadow VMCS.
1849 *
1850 * @returns VBox status code.
1851 * @param pVCpu The cross context virtual CPU structure.
1852 * @param pVmcsInfo The VMCS info. object.
1853 *
1854 * @remarks No-long-jump zone!!!
1855 */
1856static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1857{
1858 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1859 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1860
1861 /*
1862 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1863 * current VMCS, as we may try saving guest lazy MSRs.
1864 *
1865 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1866 * calling the import VMCS code which is currently performing the guest MSR reads
1867 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1868 * and the rest of the VMX leave session machinery.
1869 */
1870 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1871
1872 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1873 if (RT_SUCCESS(rc))
1874 {
1875 /*
1876 * Copy all guest read/write VMCS fields.
1877 *
1878 * We don't check for VMWRITE failures here for performance reasons and
1879 * because they are not expected to fail, barring irrecoverable conditions
1880 * like hardware errors.
1881 */
1882 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1883 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1884 {
1885 uint64_t u64Val;
1886 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1887 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1888 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1889 }
1890
1891 /*
1892 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1893 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1894 */
1895 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1896 {
1897 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1898 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1899 {
1900 uint64_t u64Val;
1901 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1902 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1903 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1904 }
1905 }
1906
1907 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1908 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1909 }
1910
1911 ASMSetFlags(fEFlags);
1912 return rc;
1913}
1914
1915
1916/**
1917 * Copies the shadow VMCS to the nested-guest VMCS.
1918 *
1919 * @returns VBox status code.
1920 * @param pVCpu The cross context virtual CPU structure.
1921 * @param pVmcsInfo The VMCS info. object.
1922 *
1923 * @remarks Called with interrupts disabled.
1924 */
1925static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1926{
1927 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1928 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1929 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1930
1931 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1932 if (RT_SUCCESS(rc))
1933 {
1934 /*
1935 * Copy guest read/write fields from the shadow VMCS.
1936 * Guest read-only fields cannot be modified, so no need to copy them.
1937 *
1938 * We don't check for VMREAD failures here for performance reasons and
1939 * because they are not expected to fail, barring irrecoverable conditions
1940 * like hardware errors.
1941 */
1942 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1943 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1944 {
1945 uint64_t u64Val;
1946 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1947 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1948 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1949 }
1950
1951 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1952 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1953 }
1954 return rc;
1955}
1956
1957
1958/**
1959 * Enables VMCS shadowing for the given VMCS info. object.
1960 *
1961 * @param pVCpu The cross context virtual CPU structure.
1962 * @param pVmcsInfo The VMCS info. object.
1963 *
1964 * @remarks No-long-jump zone!!!
1965 */
1966static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1967{
1968 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1969 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1970 {
1971 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1972 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1973 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1974 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1975 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1976 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1977 Log4Func(("Enabled\n"));
1978 }
1979}
1980
1981
1982/**
1983 * Disables VMCS shadowing for the given VMCS info. object.
1984 *
1985 * @param pVCpu The cross context virtual CPU structure.
1986 * @param pVmcsInfo The VMCS info. object.
1987 *
1988 * @remarks No-long-jump zone!!!
1989 */
1990static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1991{
1992 /*
1993 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1994 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1995 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
1996 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
1997 *
1998 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
1999 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2000 */
2001 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2002 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2003 {
2004 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2005 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2006 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2007 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2008 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2009 Log4Func(("Disabled\n"));
2010 }
2011}
2012#endif
2013
2014
2015/**
2016 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2017 *
2018 * The guest FPU state is always pre-loaded hence we don't need to bother about
2019 * sharing FPU related CR0 bits between the guest and host.
2020 *
2021 * @returns VBox status code.
2022 * @param pVCpu The cross context virtual CPU structure.
2023 * @param pVmxTransient The VMX-transient structure.
2024 *
2025 * @remarks No-long-jump zone!!!
2026 */
2027static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2028{
2029 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2030 {
2031 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2032 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2033
2034 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2035 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2036 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2037 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2038 else
2039 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2040
2041 if (!pVmxTransient->fIsNestedGuest)
2042 {
2043 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2044 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2045 uint64_t const u64ShadowCr0 = u64GuestCr0;
2046 Assert(!RT_HI_U32(u64GuestCr0));
2047
2048 /*
2049 * Setup VT-x's view of the guest CR0.
2050 */
2051 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2052 if (VM_IS_VMX_NESTED_PAGING(pVM))
2053 {
2054#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2055 if (CPUMIsGuestPagingEnabled(pVCpu))
2056 {
2057 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2058 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2059 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2060 }
2061 else
2062 {
2063 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2064 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2065 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2066 }
2067
2068 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2069 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2070 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2071#endif
2072 }
2073 else
2074 {
2075 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2076 u64GuestCr0 |= X86_CR0_WP;
2077 }
2078
2079 /*
2080 * Guest FPU bits.
2081 *
2082 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2083 * using CR0.TS.
2084 *
2085 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2086 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2087 */
2088 u64GuestCr0 |= X86_CR0_NE;
2089
2090 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2091 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2092
2093 /*
2094 * Update exception intercepts.
2095 */
2096 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2097#ifndef IN_NEM_DARWIN
2098 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2099 {
2100 Assert(PDMVmmDevHeapIsEnabled(pVM));
2101 Assert(pVM->hm.s.vmx.pRealModeTSS);
2102 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2103 }
2104 else
2105#endif
2106 {
2107 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2108 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2109 if (fInterceptMF)
2110 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2111 }
2112
2113 /* Additional intercepts for debugging, define these yourself explicitly. */
2114#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2115 uXcptBitmap |= 0
2116 | RT_BIT(X86_XCPT_BP)
2117 | RT_BIT(X86_XCPT_DE)
2118 | RT_BIT(X86_XCPT_NM)
2119 | RT_BIT(X86_XCPT_TS)
2120 | RT_BIT(X86_XCPT_UD)
2121 | RT_BIT(X86_XCPT_NP)
2122 | RT_BIT(X86_XCPT_SS)
2123 | RT_BIT(X86_XCPT_GP)
2124 | RT_BIT(X86_XCPT_PF)
2125 | RT_BIT(X86_XCPT_MF)
2126 ;
2127#elif defined(HMVMX_ALWAYS_TRAP_PF)
2128 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2129#endif
2130 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2131 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2132 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2133 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2134 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2135
2136 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2137 u64GuestCr0 |= fSetCr0;
2138 u64GuestCr0 &= fZapCr0;
2139 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2140
2141 /* Commit the CR0 and related fields to the guest VMCS. */
2142 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2143 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2144 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2145 {
2146 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2147 AssertRC(rc);
2148 }
2149 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2150 {
2151 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2152 AssertRC(rc);
2153 }
2154
2155 /* Update our caches. */
2156 pVmcsInfo->u32ProcCtls = uProcCtls;
2157 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2158
2159 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2160 }
2161 else
2162 {
2163 /*
2164 * With nested-guests, we may have extended the guest/host mask here since we
2165 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2166 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2167 * originally supplied. We must copy those bits from the nested-guest CR0 into
2168 * the nested-guest CR0 read-shadow.
2169 */
2170 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2171 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2172 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2173 Assert(!RT_HI_U32(u64GuestCr0));
2174 Assert(u64GuestCr0 & X86_CR0_NE);
2175
2176 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2177 u64GuestCr0 |= fSetCr0;
2178 u64GuestCr0 &= fZapCr0;
2179 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2180
2181 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2182 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2183 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2184
2185 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2186 }
2187
2188 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2189 }
2190
2191 return VINF_SUCCESS;
2192}
2193
2194
2195/**
2196 * Exports the guest control registers (CR3, CR4) into the guest-state area
2197 * in the VMCS.
2198 *
2199 * @returns VBox strict status code.
2200 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2201 * without unrestricted guest access and the VMMDev is not presently
2202 * mapped (e.g. EFI32).
2203 *
2204 * @param pVCpu The cross context virtual CPU structure.
2205 * @param pVmxTransient The VMX-transient structure.
2206 *
2207 * @remarks No-long-jump zone!!!
2208 */
2209static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2210{
2211 int rc = VINF_SUCCESS;
2212 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2213
2214 /*
2215 * Guest CR2.
2216 * It's always loaded in the assembler code. Nothing to do here.
2217 */
2218
2219 /*
2220 * Guest CR3.
2221 */
2222 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2223 {
2224 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2225
2226 if (VM_IS_VMX_NESTED_PAGING(pVM))
2227 {
2228#ifndef IN_NEM_DARWIN
2229 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2230 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2231
2232 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2233 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2234 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2235 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2236
2237 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2238 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2239 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2240
2241 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2242 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2243 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2244 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2245 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2246 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2247 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2248
2249 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2250 AssertRC(rc);
2251#endif
2252
2253 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2254 uint64_t u64GuestCr3 = pCtx->cr3;
2255 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2256 || CPUMIsGuestPagingEnabledEx(pCtx))
2257 {
2258 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2259 if (CPUMIsGuestInPAEModeEx(pCtx))
2260 {
2261 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2262 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2263 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2264 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2265 }
2266
2267 /*
2268 * The guest's view of its CR3 is unblemished with nested paging when the
2269 * guest is using paging or we have unrestricted guest execution to handle
2270 * the guest when it's not using paging.
2271 */
2272 }
2273#ifndef IN_NEM_DARWIN
2274 else
2275 {
2276 /*
2277 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2278 * thinks it accesses physical memory directly, we use our identity-mapped
2279 * page table to map guest-linear to guest-physical addresses. EPT takes care
2280 * of translating it to host-physical addresses.
2281 */
2282 RTGCPHYS GCPhys;
2283 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2284
2285 /* We obtain it here every time as the guest could have relocated this PCI region. */
2286 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2287 if (RT_SUCCESS(rc))
2288 { /* likely */ }
2289 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2290 {
2291 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2292 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2293 }
2294 else
2295 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2296
2297 u64GuestCr3 = GCPhys;
2298 }
2299#endif
2300
2301 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2302 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2303 AssertRC(rc);
2304 }
2305 else
2306 {
2307 Assert(!pVmxTransient->fIsNestedGuest);
2308 /* Non-nested paging case, just use the hypervisor's CR3. */
2309 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2310
2311 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2312 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2313 AssertRC(rc);
2314 }
2315
2316 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2317 }
2318
2319 /*
2320 * Guest CR4.
2321 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2322 */
2323 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2324 {
2325 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2326 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2327
2328 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2329 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2330
2331 /*
2332 * With nested-guests, we may have extended the guest/host mask here (since we
2333 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2334 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2335 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2336 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2337 */
2338 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2339 uint64_t u64GuestCr4 = pCtx->cr4;
2340 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2341 ? pCtx->cr4
2342 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2343 Assert(!RT_HI_U32(u64GuestCr4));
2344
2345#ifndef IN_NEM_DARWIN
2346 /*
2347 * Setup VT-x's view of the guest CR4.
2348 *
2349 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2350 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2351 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2352 *
2353 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2354 */
2355 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2356 {
2357 Assert(pVM->hm.s.vmx.pRealModeTSS);
2358 Assert(PDMVmmDevHeapIsEnabled(pVM));
2359 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2360 }
2361#endif
2362
2363 if (VM_IS_VMX_NESTED_PAGING(pVM))
2364 {
2365 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2366 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2367 {
2368 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2369 u64GuestCr4 |= X86_CR4_PSE;
2370 /* Our identity mapping is a 32-bit page directory. */
2371 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2372 }
2373 /* else use guest CR4.*/
2374 }
2375 else
2376 {
2377 Assert(!pVmxTransient->fIsNestedGuest);
2378
2379 /*
2380 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2381 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2382 */
2383 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2384 {
2385 case PGMMODE_REAL: /* Real-mode. */
2386 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2387 case PGMMODE_32_BIT: /* 32-bit paging. */
2388 {
2389 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2390 break;
2391 }
2392
2393 case PGMMODE_PAE: /* PAE paging. */
2394 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2395 {
2396 u64GuestCr4 |= X86_CR4_PAE;
2397 break;
2398 }
2399
2400 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2401 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2402 {
2403#ifdef VBOX_WITH_64_BITS_GUESTS
2404 /* For our assumption in vmxHCShouldSwapEferMsr. */
2405 Assert(u64GuestCr4 & X86_CR4_PAE);
2406 break;
2407#endif
2408 }
2409 default:
2410 AssertFailed();
2411 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2412 }
2413 }
2414
2415 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2416 u64GuestCr4 |= fSetCr4;
2417 u64GuestCr4 &= fZapCr4;
2418
2419 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2420 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2421 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2422
2423#ifndef IN_NEM_DARWIN
2424 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2425 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2426 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2427 {
2428 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2429 hmR0VmxUpdateStartVmFunction(pVCpu);
2430 }
2431#endif
2432
2433 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2434
2435 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2436 }
2437 return rc;
2438}
2439
2440
2441#ifdef VBOX_STRICT
2442/**
2443 * Strict function to validate segment registers.
2444 *
2445 * @param pVCpu The cross context virtual CPU structure.
2446 * @param pVmcsInfo The VMCS info. object.
2447 *
2448 * @remarks Will import guest CR0 on strict builds during validation of
2449 * segments.
2450 */
2451static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2452{
2453 /*
2454 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2455 *
2456 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2457 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2458 * unusable bit and doesn't change the guest-context value.
2459 */
2460 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2461 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2462 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2463 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2464 && ( !CPUMIsGuestInRealModeEx(pCtx)
2465 && !CPUMIsGuestInV86ModeEx(pCtx)))
2466 {
2467 /* Protected mode checks */
2468 /* CS */
2469 Assert(pCtx->cs.Attr.n.u1Present);
2470 Assert(!(pCtx->cs.Attr.u & 0xf00));
2471 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2472 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2473 || !(pCtx->cs.Attr.n.u1Granularity));
2474 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2475 || (pCtx->cs.Attr.n.u1Granularity));
2476 /* CS cannot be loaded with NULL in protected mode. */
2477 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2478 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2479 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2480 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2481 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2482 else
2483 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2484 /* SS */
2485 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2486 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2487 if ( !(pCtx->cr0 & X86_CR0_PE)
2488 || pCtx->cs.Attr.n.u4Type == 3)
2489 {
2490 Assert(!pCtx->ss.Attr.n.u2Dpl);
2491 }
2492 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2493 {
2494 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2495 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2496 Assert(pCtx->ss.Attr.n.u1Present);
2497 Assert(!(pCtx->ss.Attr.u & 0xf00));
2498 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2499 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2500 || !(pCtx->ss.Attr.n.u1Granularity));
2501 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2502 || (pCtx->ss.Attr.n.u1Granularity));
2503 }
2504 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2505 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2506 {
2507 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2508 Assert(pCtx->ds.Attr.n.u1Present);
2509 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2510 Assert(!(pCtx->ds.Attr.u & 0xf00));
2511 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2512 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2513 || !(pCtx->ds.Attr.n.u1Granularity));
2514 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2515 || (pCtx->ds.Attr.n.u1Granularity));
2516 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2517 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2518 }
2519 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2520 {
2521 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2522 Assert(pCtx->es.Attr.n.u1Present);
2523 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2524 Assert(!(pCtx->es.Attr.u & 0xf00));
2525 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2526 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2527 || !(pCtx->es.Attr.n.u1Granularity));
2528 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2529 || (pCtx->es.Attr.n.u1Granularity));
2530 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2531 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2532 }
2533 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2534 {
2535 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2536 Assert(pCtx->fs.Attr.n.u1Present);
2537 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2538 Assert(!(pCtx->fs.Attr.u & 0xf00));
2539 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2540 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2541 || !(pCtx->fs.Attr.n.u1Granularity));
2542 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2543 || (pCtx->fs.Attr.n.u1Granularity));
2544 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2545 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2546 }
2547 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2548 {
2549 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2550 Assert(pCtx->gs.Attr.n.u1Present);
2551 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2552 Assert(!(pCtx->gs.Attr.u & 0xf00));
2553 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2554 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2555 || !(pCtx->gs.Attr.n.u1Granularity));
2556 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2557 || (pCtx->gs.Attr.n.u1Granularity));
2558 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2559 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2560 }
2561 /* 64-bit capable CPUs. */
2562 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2563 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2564 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2565 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2566 }
2567 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2568 || ( CPUMIsGuestInRealModeEx(pCtx)
2569 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2570 {
2571 /* Real and v86 mode checks. */
2572 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2573 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2574#ifndef IN_NEM_DARWIN
2575 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2576 {
2577 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2578 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2579 }
2580 else
2581#endif
2582 {
2583 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2584 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2585 }
2586
2587 /* CS */
2588 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2589 Assert(pCtx->cs.u32Limit == 0xffff);
2590 AssertMsg(u32CSAttr == 0xf3, ("cs=%#x %#x ", pCtx->cs.Sel, u32CSAttr));
2591 /* SS */
2592 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2593 Assert(pCtx->ss.u32Limit == 0xffff);
2594 Assert(u32SSAttr == 0xf3);
2595 /* DS */
2596 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2597 Assert(pCtx->ds.u32Limit == 0xffff);
2598 Assert(u32DSAttr == 0xf3);
2599 /* ES */
2600 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2601 Assert(pCtx->es.u32Limit == 0xffff);
2602 Assert(u32ESAttr == 0xf3);
2603 /* FS */
2604 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2605 Assert(pCtx->fs.u32Limit == 0xffff);
2606 Assert(u32FSAttr == 0xf3);
2607 /* GS */
2608 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2609 Assert(pCtx->gs.u32Limit == 0xffff);
2610 Assert(u32GSAttr == 0xf3);
2611 /* 64-bit capable CPUs. */
2612 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2613 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2614 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2615 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2616 }
2617}
2618#endif /* VBOX_STRICT */
2619
2620
2621/**
2622 * Exports a guest segment register into the guest-state area in the VMCS.
2623 *
2624 * @returns VBox status code.
2625 * @param pVCpu The cross context virtual CPU structure.
2626 * @param pVmcsInfo The VMCS info. object.
2627 * @param iSegReg The segment register number (X86_SREG_XXX).
2628 * @param pSelReg Pointer to the segment selector.
2629 *
2630 * @remarks No-long-jump zone!!!
2631 */
2632static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2633{
2634 Assert(iSegReg < X86_SREG_COUNT);
2635
2636 uint32_t u32Access = pSelReg->Attr.u;
2637#ifndef IN_NEM_DARWIN
2638 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2639#endif
2640 {
2641 /*
2642 * The way to differentiate between whether this is really a null selector or was just
2643 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2644 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2645 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2646 * NULL selectors loaded in protected-mode have their attribute as 0.
2647 */
2648 if (u32Access)
2649 { }
2650 else
2651 u32Access = X86DESCATTR_UNUSABLE;
2652 }
2653#ifndef IN_NEM_DARWIN
2654 else
2655 {
2656 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2657 u32Access = 0xf3;
2658 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2659 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2660 RT_NOREF_PV(pVCpu);
2661 }
2662#else
2663 RT_NOREF(pVmcsInfo);
2664#endif
2665
2666 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2667 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2668 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2669
2670 /*
2671 * Commit it to the VMCS.
2672 */
2673 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2674 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2675 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2676 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2677 return VINF_SUCCESS;
2678}
2679
2680
2681/**
2682 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2683 * area in the VMCS.
2684 *
2685 * @returns VBox status code.
2686 * @param pVCpu The cross context virtual CPU structure.
2687 * @param pVmxTransient The VMX-transient structure.
2688 *
2689 * @remarks Will import guest CR0 on strict builds during validation of
2690 * segments.
2691 * @remarks No-long-jump zone!!!
2692 */
2693static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2694{
2695 int rc = VERR_INTERNAL_ERROR_5;
2696#ifndef IN_NEM_DARWIN
2697 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2698#endif
2699 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2700 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2701#ifndef IN_NEM_DARWIN
2702 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2703#endif
2704
2705 /*
2706 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2707 */
2708 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2709 {
2710 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2711 {
2712 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2713#ifndef IN_NEM_DARWIN
2714 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2715 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2716#endif
2717 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2718 AssertRC(rc);
2719 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2720 }
2721
2722 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2723 {
2724 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2725#ifndef IN_NEM_DARWIN
2726 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2727 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2728#endif
2729 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2730 AssertRC(rc);
2731 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2732 }
2733
2734 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2735 {
2736 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2737#ifndef IN_NEM_DARWIN
2738 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2739 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2740#endif
2741 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2742 AssertRC(rc);
2743 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2744 }
2745
2746 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2747 {
2748 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2749#ifndef IN_NEM_DARWIN
2750 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2751 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2752#endif
2753 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2754 AssertRC(rc);
2755 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2756 }
2757
2758 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2759 {
2760 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2761#ifndef IN_NEM_DARWIN
2762 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2763 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2764#endif
2765 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2766 AssertRC(rc);
2767 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2768 }
2769
2770 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2771 {
2772 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2773#ifndef IN_NEM_DARWIN
2774 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2775 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2776#endif
2777 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2778 AssertRC(rc);
2779 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2780 }
2781
2782#ifdef VBOX_STRICT
2783 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2784#endif
2785 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2786 pCtx->cs.Attr.u));
2787 }
2788
2789 /*
2790 * Guest TR.
2791 */
2792 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2793 {
2794 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2795
2796 /*
2797 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2798 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2799 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2800 */
2801 uint16_t u16Sel;
2802 uint32_t u32Limit;
2803 uint64_t u64Base;
2804 uint32_t u32AccessRights;
2805#ifndef IN_NEM_DARWIN
2806 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2807#endif
2808 {
2809 u16Sel = pCtx->tr.Sel;
2810 u32Limit = pCtx->tr.u32Limit;
2811 u64Base = pCtx->tr.u64Base;
2812 u32AccessRights = pCtx->tr.Attr.u;
2813 }
2814#ifndef IN_NEM_DARWIN
2815 else
2816 {
2817 Assert(!pVmxTransient->fIsNestedGuest);
2818 Assert(pVM->hm.s.vmx.pRealModeTSS);
2819 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2820
2821 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2822 RTGCPHYS GCPhys;
2823 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2824 AssertRCReturn(rc, rc);
2825
2826 X86DESCATTR DescAttr;
2827 DescAttr.u = 0;
2828 DescAttr.n.u1Present = 1;
2829 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2830
2831 u16Sel = 0;
2832 u32Limit = HM_VTX_TSS_SIZE;
2833 u64Base = GCPhys;
2834 u32AccessRights = DescAttr.u;
2835 }
2836#endif
2837
2838 /* Validate. */
2839 Assert(!(u16Sel & RT_BIT(2)));
2840 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2841 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2842 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2843 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2844 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2845 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2846 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2847 Assert( (u32Limit & 0xfff) == 0xfff
2848 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2849 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2850 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2851
2852 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2853 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2854 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2855 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2856
2857 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2858 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2859 }
2860
2861 /*
2862 * Guest GDTR.
2863 */
2864 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2865 {
2866 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2867
2868 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2869 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2870
2871 /* Validate. */
2872 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2873
2874 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2875 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2876 }
2877
2878 /*
2879 * Guest LDTR.
2880 */
2881 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2882 {
2883 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2884
2885 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2886 uint32_t u32Access;
2887 if ( !pVmxTransient->fIsNestedGuest
2888 && !pCtx->ldtr.Attr.u)
2889 u32Access = X86DESCATTR_UNUSABLE;
2890 else
2891 u32Access = pCtx->ldtr.Attr.u;
2892
2893 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2894 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2895 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2896 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2897
2898 /* Validate. */
2899 if (!(u32Access & X86DESCATTR_UNUSABLE))
2900 {
2901 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2902 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2903 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2904 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2905 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2906 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2907 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2908 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2909 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2910 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2911 }
2912
2913 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2914 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2915 }
2916
2917 /*
2918 * Guest IDTR.
2919 */
2920 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2921 {
2922 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2923
2924 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2925 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2926
2927 /* Validate. */
2928 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2929
2930 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2931 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2932 }
2933
2934 return VINF_SUCCESS;
2935}
2936
2937
2938/**
2939 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2940 * VM-exit interruption info type.
2941 *
2942 * @returns The IEM exception flags.
2943 * @param uVector The event vector.
2944 * @param uVmxEventType The VMX event type.
2945 *
2946 * @remarks This function currently only constructs flags required for
2947 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2948 * and CR2 aspects of an exception are not included).
2949 */
2950static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2951{
2952 uint32_t fIemXcptFlags;
2953 switch (uVmxEventType)
2954 {
2955 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2956 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2957 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2958 break;
2959
2960 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2961 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2962 break;
2963
2964 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2965 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2966 break;
2967
2968 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2969 {
2970 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2971 if (uVector == X86_XCPT_BP)
2972 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2973 else if (uVector == X86_XCPT_OF)
2974 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2975 else
2976 {
2977 fIemXcptFlags = 0;
2978 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2979 }
2980 break;
2981 }
2982
2983 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2984 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2985 break;
2986
2987 default:
2988 fIemXcptFlags = 0;
2989 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
2990 break;
2991 }
2992 return fIemXcptFlags;
2993}
2994
2995
2996/**
2997 * Sets an event as a pending event to be injected into the guest.
2998 *
2999 * @param pVCpu The cross context virtual CPU structure.
3000 * @param u32IntInfo The VM-entry interruption-information field.
3001 * @param cbInstr The VM-entry instruction length in bytes (for
3002 * software interrupts, exceptions and privileged
3003 * software exceptions).
3004 * @param u32ErrCode The VM-entry exception error code.
3005 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3006 * page-fault.
3007 */
3008DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3009 RTGCUINTPTR GCPtrFaultAddress)
3010{
3011 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3012 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3013 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3014 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3015 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3016 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3017}
3018
3019
3020/**
3021 * Sets an external interrupt as pending-for-injection into the VM.
3022 *
3023 * @param pVCpu The cross context virtual CPU structure.
3024 * @param u8Interrupt The external interrupt vector.
3025 */
3026DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3027{
3028 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3029 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3030 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3031 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3032 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3033}
3034
3035
3036/**
3037 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3038 *
3039 * @param pVCpu The cross context virtual CPU structure.
3040 */
3041DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3042{
3043 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3044 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3045 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3046 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3047 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3048}
3049
3050
3051/**
3052 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3053 *
3054 * @param pVCpu The cross context virtual CPU structure.
3055 */
3056DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3057{
3058 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3059 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3060 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3061 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3062 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3063}
3064
3065
3066/**
3067 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3068 *
3069 * @param pVCpu The cross context virtual CPU structure.
3070 */
3071DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3072{
3073 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3074 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3075 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3076 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3077 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3078}
3079
3080
3081/**
3082 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3083 *
3084 * @param pVCpu The cross context virtual CPU structure.
3085 */
3086DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3087{
3088 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3089 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3090 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3091 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3092 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3093}
3094
3095
3096#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3097/**
3098 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3099 *
3100 * @param pVCpu The cross context virtual CPU structure.
3101 * @param u32ErrCode The error code for the general-protection exception.
3102 */
3103DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3104{
3105 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3106 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3107 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3108 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3109 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3110}
3111
3112
3113/**
3114 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3115 *
3116 * @param pVCpu The cross context virtual CPU structure.
3117 * @param u32ErrCode The error code for the stack exception.
3118 */
3119DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3120{
3121 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3122 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3123 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3124 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3125 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3126}
3127#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3128
3129
3130/**
3131 * Fixes up attributes for the specified segment register.
3132 *
3133 * @param pVCpu The cross context virtual CPU structure.
3134 * @param pSelReg The segment register that needs fixing.
3135 * @param pszRegName The register name (for logging and assertions).
3136 */
3137static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3138{
3139 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3140
3141 /*
3142 * If VT-x marks the segment as unusable, most other bits remain undefined:
3143 * - For CS the L, D and G bits have meaning.
3144 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3145 * - For the remaining data segments no bits are defined.
3146 *
3147 * The present bit and the unusable bit has been observed to be set at the
3148 * same time (the selector was supposed to be invalid as we started executing
3149 * a V8086 interrupt in ring-0).
3150 *
3151 * What should be important for the rest of the VBox code, is that the P bit is
3152 * cleared. Some of the other VBox code recognizes the unusable bit, but
3153 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3154 * safe side here, we'll strip off P and other bits we don't care about. If
3155 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3156 *
3157 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3158 */
3159#ifdef VBOX_STRICT
3160 uint32_t const uAttr = pSelReg->Attr.u;
3161#endif
3162
3163 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3164 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3165 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3166
3167#ifdef VBOX_STRICT
3168# ifndef IN_NEM_DARWIN
3169 VMMRZCallRing3Disable(pVCpu);
3170# endif
3171 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3172# ifdef DEBUG_bird
3173 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3174 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3175 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3176# endif
3177# ifndef IN_NEM_DARWIN
3178 VMMRZCallRing3Enable(pVCpu);
3179# endif
3180 NOREF(uAttr);
3181#endif
3182 RT_NOREF2(pVCpu, pszRegName);
3183}
3184
3185
3186/**
3187 * Imports a guest segment register from the current VMCS into the guest-CPU
3188 * context.
3189 *
3190 * @param pVCpu The cross context virtual CPU structure.
3191 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3192 *
3193 * @remarks Called with interrupts and/or preemption disabled.
3194 */
3195template<uint32_t const a_iSegReg>
3196DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3197{
3198 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3199 /* Check that the macros we depend upon here and in the export parenter function works: */
3200#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3201 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3202 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3203 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3204 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3205 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3206 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3207 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3208 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3209 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3210 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3211
3212 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3213
3214 uint16_t u16Sel;
3215 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3216 pSelReg->Sel = u16Sel;
3217 pSelReg->ValidSel = u16Sel;
3218
3219 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3220 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3221
3222 uint32_t u32Attr;
3223 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3224 pSelReg->Attr.u = u32Attr;
3225 if (u32Attr & X86DESCATTR_UNUSABLE)
3226 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3227
3228 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3229}
3230
3231
3232/**
3233 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3234 *
3235 * @param pVCpu The cross context virtual CPU structure.
3236 *
3237 * @remarks Called with interrupts and/or preemption disabled.
3238 */
3239DECLINLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3240{
3241 uint16_t u16Sel;
3242 uint64_t u64Base;
3243 uint32_t u32Limit, u32Attr;
3244 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3245 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3246 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3247 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3248
3249 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3250 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3251 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3252 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3253 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3254 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3255 if (u32Attr & X86DESCATTR_UNUSABLE)
3256 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3257}
3258
3259
3260/**
3261 * Imports the guest TR from the current VMCS into the guest-CPU context.
3262 *
3263 * @param pVCpu The cross context virtual CPU structure.
3264 *
3265 * @remarks Called with interrupts and/or preemption disabled.
3266 */
3267DECLINLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3268{
3269 uint16_t u16Sel;
3270 uint64_t u64Base;
3271 uint32_t u32Limit, u32Attr;
3272 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3273 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3274 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3275 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3276
3277 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3278 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3279 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3280 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3281 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3282 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3283 /* TR is the only selector that can never be unusable. */
3284 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3285}
3286
3287
3288/**
3289 * Core: Imports the guest RIP from the VMCS back into the guest-CPU context.
3290 *
3291 * @returns The RIP value.
3292 * @param pVCpu The cross context virtual CPU structure.
3293 *
3294 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3295 * @remarks Do -not- call this function directly!
3296 */
3297DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3298{
3299 uint64_t u64Val;
3300 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3301 AssertRC(rc);
3302
3303 pVCpu->cpum.GstCtx.rip = u64Val;
3304
3305 return u64Val;
3306}
3307
3308
3309/**
3310 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3311 *
3312 * @param pVCpu The cross context virtual CPU structure.
3313 *
3314 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3315 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3316 * instead!!!
3317 */
3318DECLINLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3319{
3320 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3321 {
3322 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3323 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3324 }
3325}
3326
3327
3328/**
3329 * Core: Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3330 *
3331 * @param pVCpu The cross context virtual CPU structure.
3332 * @param pVmcsInfo The VMCS info. object.
3333 *
3334 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3335 * @remarks Do -not- call this function directly!
3336 */
3337DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3338{
3339 uint64_t fRFlags;
3340 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &fRFlags);
3341 AssertRC(rc);
3342
3343 Assert((fRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
3344 Assert((fRFlags & ~(uint64_t)(X86_EFL_1 | X86_EFL_LIVE_MASK)) == 0);
3345
3346 pVCpu->cpum.GstCtx.rflags.u = fRFlags;
3347#ifndef IN_NEM_DARWIN
3348 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3349 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
3350 { /* mostly likely */ }
3351 else
3352 {
3353 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3354 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3355 }
3356#else
3357 RT_NOREF(pVmcsInfo);
3358#endif
3359}
3360
3361
3362/**
3363 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3364 *
3365 * @param pVCpu The cross context virtual CPU structure.
3366 * @param pVmcsInfo The VMCS info. object.
3367 *
3368 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3369 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3370 * instead!!!
3371 */
3372DECLINLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3373{
3374 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3375 {
3376 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3377 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3378 }
3379}
3380
3381
3382/**
3383 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3384 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3385 */
3386DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3387{
3388 /*
3389 * We must import RIP here to set our EM interrupt-inhibited state.
3390 * We also import RFLAGS as our code that evaluates pending interrupts
3391 * before VM-entry requires it.
3392 */
3393 vmxHCImportGuestRip(pVCpu);
3394 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3395
3396 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3397 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3398 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3399 pVCpu->cpum.GstCtx.rip);
3400 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3401}
3402
3403
3404/**
3405 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3406 * context.
3407 *
3408 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3409 *
3410 * @param pVCpu The cross context virtual CPU structure.
3411 * @param pVmcsInfo The VMCS info. object.
3412 *
3413 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3414 * do not log!
3415 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3416 * instead!!!
3417 */
3418DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3419{
3420 uint32_t u32Val;
3421 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3422 if (!u32Val)
3423 {
3424 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3425 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3426 }
3427 else
3428 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3429}
3430
3431
3432/**
3433 * Worker for VMXR0ImportStateOnDemand.
3434 *
3435 * @returns VBox status code.
3436 * @param pVCpu The cross context virtual CPU structure.
3437 * @param pVmcsInfo The VMCS info. object.
3438 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3439 */
3440static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3441{
3442 int rc = VINF_SUCCESS;
3443 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3444 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3445 uint32_t u32Val;
3446
3447 /*
3448 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3449 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3450 * neither are other host platforms.
3451 *
3452 * Committing this temporarily as it prevents BSOD.
3453 *
3454 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3455 */
3456#ifdef RT_OS_WINDOWS
3457 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3458 return VERR_HM_IPE_1;
3459#endif
3460
3461 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3462
3463#ifndef IN_NEM_DARWIN
3464 /*
3465 * We disable interrupts to make the updating of the state and in particular
3466 * the fExtrn modification atomic wrt to preemption hooks.
3467 */
3468 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3469#endif
3470
3471 fWhat &= pCtx->fExtrn;
3472 if (fWhat)
3473 {
3474 do
3475 {
3476 if (fWhat & CPUMCTX_EXTRN_RIP)
3477 vmxHCImportGuestRip(pVCpu);
3478
3479 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3480 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3481
3482 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3483 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3484 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3485
3486 if (fWhat & CPUMCTX_EXTRN_RSP)
3487 {
3488 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3489 AssertRC(rc);
3490 }
3491
3492 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3493 {
3494 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3495#ifndef IN_NEM_DARWIN
3496 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3497#else
3498 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3499#endif
3500 if (fWhat & CPUMCTX_EXTRN_CS)
3501 {
3502 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3503 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3504 if (fRealOnV86Active)
3505 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3506 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3507 }
3508 if (fWhat & CPUMCTX_EXTRN_SS)
3509 {
3510 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3511 if (fRealOnV86Active)
3512 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3513 }
3514 if (fWhat & CPUMCTX_EXTRN_DS)
3515 {
3516 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3517 if (fRealOnV86Active)
3518 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3519 }
3520 if (fWhat & CPUMCTX_EXTRN_ES)
3521 {
3522 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3523 if (fRealOnV86Active)
3524 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3525 }
3526 if (fWhat & CPUMCTX_EXTRN_FS)
3527 {
3528 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3529 if (fRealOnV86Active)
3530 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3531 }
3532 if (fWhat & CPUMCTX_EXTRN_GS)
3533 {
3534 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3535 if (fRealOnV86Active)
3536 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3537 }
3538 }
3539
3540 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3541 {
3542 if (fWhat & CPUMCTX_EXTRN_LDTR)
3543 vmxHCImportGuestLdtr(pVCpu);
3544
3545 if (fWhat & CPUMCTX_EXTRN_GDTR)
3546 {
3547 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3548 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3549 pCtx->gdtr.cbGdt = u32Val;
3550 }
3551
3552 /* Guest IDTR. */
3553 if (fWhat & CPUMCTX_EXTRN_IDTR)
3554 {
3555 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3556 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3557 pCtx->idtr.cbIdt = u32Val;
3558 }
3559
3560 /* Guest TR. */
3561 if (fWhat & CPUMCTX_EXTRN_TR)
3562 {
3563#ifndef IN_NEM_DARWIN
3564 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3565 don't need to import that one. */
3566 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3567#endif
3568 vmxHCImportGuestTr(pVCpu);
3569 }
3570 }
3571
3572 if (fWhat & CPUMCTX_EXTRN_DR7)
3573 {
3574#ifndef IN_NEM_DARWIN
3575 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3576#endif
3577 {
3578 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3579 AssertRC(rc);
3580 }
3581 }
3582
3583 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3584 {
3585 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3586 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3587 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3588 pCtx->SysEnter.cs = u32Val;
3589 }
3590
3591#ifndef IN_NEM_DARWIN
3592 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3593 {
3594 if ( pVM->hmr0.s.fAllow64BitGuests
3595 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3596 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3597 }
3598
3599 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3600 {
3601 if ( pVM->hmr0.s.fAllow64BitGuests
3602 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3603 {
3604 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3605 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3606 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3607 }
3608 }
3609
3610 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3611 {
3612 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3613 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3614 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3615 Assert(pMsrs);
3616 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3617 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3618 for (uint32_t i = 0; i < cMsrs; i++)
3619 {
3620 uint32_t const idMsr = pMsrs[i].u32Msr;
3621 switch (idMsr)
3622 {
3623 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3624 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3625 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3626 default:
3627 {
3628 uint32_t idxLbrMsr;
3629 if (VM_IS_VMX_LBR(pVM))
3630 {
3631 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3632 {
3633 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3634 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3635 break;
3636 }
3637 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3638 {
3639 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3640 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3641 break;
3642 }
3643 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3644 {
3645 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3646 break;
3647 }
3648 /* Fallthru (no break) */
3649 }
3650 pCtx->fExtrn = 0;
3651 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3652 ASMSetFlags(fEFlags);
3653 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3654 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3655 }
3656 }
3657 }
3658 }
3659#endif
3660
3661 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3662 {
3663 if (fWhat & CPUMCTX_EXTRN_CR0)
3664 {
3665 uint64_t u64Cr0;
3666 uint64_t u64Shadow;
3667 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3668 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3669#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3670 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3671 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3672#else
3673 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3674 {
3675 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3676 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3677 }
3678 else
3679 {
3680 /*
3681 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3682 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3683 * re-construct CR0. See @bugref{9180#c95} for details.
3684 */
3685 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3686 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3687 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3688 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3689 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3690 }
3691#endif
3692#ifndef IN_NEM_DARWIN
3693 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3694#endif
3695 CPUMSetGuestCR0(pVCpu, u64Cr0);
3696#ifndef IN_NEM_DARWIN
3697 VMMRZCallRing3Enable(pVCpu);
3698#endif
3699 }
3700
3701 if (fWhat & CPUMCTX_EXTRN_CR4)
3702 {
3703 uint64_t u64Cr4;
3704 uint64_t u64Shadow;
3705 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3706 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3707#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3708 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3709 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3710#else
3711 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3712 {
3713 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3714 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3715 }
3716 else
3717 {
3718 /*
3719 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3720 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3721 * re-construct CR4. See @bugref{9180#c95} for details.
3722 */
3723 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3724 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3725 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3726 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3727 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3728 }
3729#endif
3730 pCtx->cr4 = u64Cr4;
3731 }
3732
3733 if (fWhat & CPUMCTX_EXTRN_CR3)
3734 {
3735 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3736 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3737 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3738 && CPUMIsGuestPagingEnabledEx(pCtx)))
3739 {
3740 uint64_t u64Cr3;
3741 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3742 if (pCtx->cr3 != u64Cr3)
3743 {
3744 pCtx->cr3 = u64Cr3;
3745 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3746 }
3747
3748 /*
3749 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3750 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3751 */
3752 if (CPUMIsGuestInPAEModeEx(pCtx))
3753 {
3754 X86PDPE aPaePdpes[4];
3755 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3756 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3757 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3758 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3759 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3760 {
3761 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3762 /* PGM now updates PAE PDPTEs while updating CR3. */
3763 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3764 }
3765 }
3766 }
3767 }
3768 }
3769
3770#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3771 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3772 {
3773 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3774 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3775 {
3776 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3777 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3778 if (RT_SUCCESS(rc))
3779 { /* likely */ }
3780 else
3781 break;
3782 }
3783 }
3784#endif
3785 } while (0);
3786
3787 if (RT_SUCCESS(rc))
3788 {
3789 /* Update fExtrn. */
3790 pCtx->fExtrn &= ~fWhat;
3791
3792 /* If everything has been imported, clear the HM keeper bit. */
3793 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3794 {
3795#ifndef IN_NEM_DARWIN
3796 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3797#else
3798 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3799#endif
3800 Assert(!pCtx->fExtrn);
3801 }
3802 }
3803 }
3804#ifndef IN_NEM_DARWIN
3805 else
3806 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3807
3808 /*
3809 * Restore interrupts.
3810 */
3811 ASMSetFlags(fEFlags);
3812#endif
3813
3814 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3815
3816 if (RT_SUCCESS(rc))
3817 { /* likely */ }
3818 else
3819 return rc;
3820
3821 /*
3822 * Honor any pending CR3 updates.
3823 *
3824 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3825 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3826 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3827 *
3828 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3829 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3830 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3831 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3832 *
3833 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3834 *
3835 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3836 */
3837 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3838#ifndef IN_NEM_DARWIN
3839 && VMMRZCallRing3IsEnabled(pVCpu)
3840#endif
3841 )
3842 {
3843 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3844 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3845 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3846 }
3847
3848 return VINF_SUCCESS;
3849}
3850
3851
3852/**
3853 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3854 *
3855 * @returns VBox status code.
3856 * @param pVCpu The cross context virtual CPU structure.
3857 * @param pVmcsInfo The VMCS info. object.
3858 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3859 * in NEM/darwin context.
3860 * @tparam a_fWhat What to import, zero or more bits from
3861 * HMVMX_CPUMCTX_EXTRN_ALL.
3862 */
3863template<uint64_t const a_fWhat>
3864static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3865{
3866 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3867 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3868 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3869 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3870
3871 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3872
3873 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3874
3875 /* RIP and RFLAGS may have been imported already by the post exit code
3876 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3877 of the code is skipping this part of the code. */
3878 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3879 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3880 {
3881 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3882 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3883
3884 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3885 {
3886 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3887 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3888 else
3889 vmxHCImportGuestCoreRip(pVCpu);
3890 }
3891 }
3892
3893 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3894 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3895 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3896
3897 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
3898 {
3899 if (a_fWhat & CPUMCTX_EXTRN_CS)
3900 {
3901 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3902 /** @todo try get rid of this carp, it smells and is probably never ever
3903 * used: */
3904 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3905 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3906 {
3907 vmxHCImportGuestCoreRip(pVCpu);
3908 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3909 }
3910 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3911 }
3912 if (a_fWhat & CPUMCTX_EXTRN_SS)
3913 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3914 if (a_fWhat & CPUMCTX_EXTRN_DS)
3915 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3916 if (a_fWhat & CPUMCTX_EXTRN_ES)
3917 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3918 if (a_fWhat & CPUMCTX_EXTRN_FS)
3919 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3920 if (a_fWhat & CPUMCTX_EXTRN_GS)
3921 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3922
3923 /* Guest TR.
3924 Real-mode emulation using virtual-8086 mode has the fake TSS
3925 (pRealModeTSS) in TR, don't need to import that one. */
3926#ifndef IN_NEM_DARWIN
3927 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
3928 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3929 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
3930#else
3931 if (a_fWhat & CPUMCTX_EXTRN_TR)
3932#endif
3933 vmxHCImportGuestTr(pVCpu);
3934
3935#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
3936 if (fRealOnV86Active)
3937 {
3938 if (a_fWhat & CPUMCTX_EXTRN_CS)
3939 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3940 if (a_fWhat & CPUMCTX_EXTRN_SS)
3941 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3942 if (a_fWhat & CPUMCTX_EXTRN_DS)
3943 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3944 if (a_fWhat & CPUMCTX_EXTRN_ES)
3945 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3946 if (a_fWhat & CPUMCTX_EXTRN_FS)
3947 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3948 if (a_fWhat & CPUMCTX_EXTRN_GS)
3949 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3950 }
3951#endif
3952 }
3953
3954 if (a_fWhat & CPUMCTX_EXTRN_RSP)
3955 {
3956 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
3957 AssertRC(rc);
3958 }
3959
3960 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
3961 vmxHCImportGuestLdtr(pVCpu);
3962
3963 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
3964 {
3965 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
3966 uint32_t u32Val;
3967 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
3968 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
3969 }
3970
3971 /* Guest IDTR. */
3972 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
3973 {
3974 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
3975 uint32_t u32Val;
3976 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
3977 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
3978 }
3979
3980 if (a_fWhat & CPUMCTX_EXTRN_DR7)
3981 {
3982#ifndef IN_NEM_DARWIN
3983 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3984#endif
3985 {
3986 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
3987 AssertRC(rc);
3988 }
3989 }
3990
3991 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3992 {
3993 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
3994 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
3995 uint32_t u32Val;
3996 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
3997 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
3998 }
3999
4000#ifndef IN_NEM_DARWIN
4001 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
4002 {
4003 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4004 && pVM->hmr0.s.fAllow64BitGuests)
4005 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4006 }
4007
4008 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4009 {
4010 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4011 && pVM->hmr0.s.fAllow64BitGuests)
4012 {
4013 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4014 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4015 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4016 }
4017 }
4018
4019 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4020 {
4021 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
4022 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
4023 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
4024 Assert(pMsrs);
4025 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
4026 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
4027 for (uint32_t i = 0; i < cMsrs; i++)
4028 {
4029 uint32_t const idMsr = pMsrs[i].u32Msr;
4030 switch (idMsr)
4031 {
4032 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
4033 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
4034 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
4035 default:
4036 {
4037 uint32_t idxLbrMsr;
4038 if (VM_IS_VMX_LBR(pVM))
4039 {
4040 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
4041 {
4042 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4043 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4044 break;
4045 }
4046 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
4047 {
4048 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4049 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4050 break;
4051 }
4052 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
4053 {
4054 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
4055 break;
4056 }
4057 }
4058 pVCpu->cpum.GstCtx.fExtrn = 0;
4059 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
4060 ASMSetFlags(fEFlags);
4061 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
4062 return VERR_HM_UNEXPECTED_LD_ST_MSR;
4063 }
4064 }
4065 }
4066 }
4067#endif
4068
4069 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4070 {
4071 uint64_t u64Cr0;
4072 uint64_t u64Shadow;
4073 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc1);
4074 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4075#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4076 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4077 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4078#else
4079 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4080 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4081 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4082 else
4083 {
4084 /*
4085 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
4086 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4087 * re-construct CR0. See @bugref{9180#c95} for details.
4088 */
4089 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4090 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4091 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4092 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
4093 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
4094 }
4095#endif
4096#ifndef IN_NEM_DARWIN
4097 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
4098#endif
4099 CPUMSetGuestCR0(pVCpu, u64Cr0);
4100#ifndef IN_NEM_DARWIN
4101 VMMRZCallRing3Enable(pVCpu);
4102#endif
4103 }
4104
4105 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4106 {
4107 uint64_t u64Cr4;
4108 uint64_t u64Shadow;
4109 int rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc1);
4110 int rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4111#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4112 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4113 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4114#else
4115 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4116 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4117 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4118 else
4119 {
4120 /*
4121 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
4122 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4123 * re-construct CR4. See @bugref{9180#c95} for details.
4124 */
4125 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4126 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4127 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4128 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
4129 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
4130 }
4131#endif
4132 pVCpu->cpum.GstCtx.cr4 = u64Cr4;
4133 }
4134
4135 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4136 {
4137 /* CR0.PG bit changes are always intercepted, so it's up to date. */
4138 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
4139 || ( VM_IS_VMX_NESTED_PAGING(pVM)
4140 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)))
4141 {
4142 uint64_t u64Cr3;
4143 int const rc0 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc0);
4144 if (pVCpu->cpum.GstCtx.cr3 != u64Cr3)
4145 {
4146 pVCpu->cpum.GstCtx.cr3 = u64Cr3;
4147 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4148 }
4149
4150 /*
4151 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
4152 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
4153 */
4154 if (CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx))
4155 {
4156 X86PDPE aPaePdpes[4];
4157 int const rc1 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc1);
4158 int const rc2 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc2);
4159 int const rc3 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc3);
4160 int const rc4 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc4);
4161 if (memcmp(&aPaePdpes[0], &pVCpu->cpum.GstCtx.aPaePdpes[0], sizeof(aPaePdpes)))
4162 {
4163 memcpy(&pVCpu->cpum.GstCtx.aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
4164 /* PGM now updates PAE PDPTEs while updating CR3. */
4165 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4166 }
4167 }
4168 }
4169 }
4170
4171#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4172 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4173 {
4174 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4175 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4176 {
4177 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4178 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4179 AssertRCReturn(rc, rc);
4180 }
4181 }
4182#endif
4183
4184 /* Update fExtrn. */
4185 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4186
4187 /* If everything has been imported, clear the HM keeper bit. */
4188 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4189 {
4190#ifndef IN_NEM_DARWIN
4191 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4192#else
4193 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4194#endif
4195 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4196 }
4197
4198 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4199
4200 /*
4201 * Honor any pending CR3 updates.
4202 *
4203 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4204 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4205 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4206 *
4207 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4208 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4209 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4210 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4211 *
4212 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4213 *
4214 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4215 */
4216#ifndef IN_NEM_DARWIN
4217 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4218 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4219 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4220 return VINF_SUCCESS;
4221 ASMSetFlags(fEFlags);
4222#else
4223 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4224 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4225 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4226 return VINF_SUCCESS;
4227 RT_NOREF_PV(fEFlags);
4228#endif
4229
4230 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4231 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4232 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4233 return VINF_SUCCESS;
4234}
4235
4236
4237/**
4238 * Internal state fetcher.
4239 *
4240 * @returns VBox status code.
4241 * @param pVCpu The cross context virtual CPU structure.
4242 * @param pVmcsInfo The VMCS info. object.
4243 * @param pszCaller For logging.
4244 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4245 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4246 * already. This is ORed together with @a a_fWhat when
4247 * calculating what needs fetching (just for safety).
4248 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4249 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4250 * already. This is ORed together with @a a_fWhat when
4251 * calculating what needs fetching (just for safety).
4252 */
4253template<uint64_t const a_fWhat,
4254 uint64_t const a_fDoneLocal = 0,
4255 uint64_t const a_fDonePostExit = 0
4256#ifndef IN_NEM_DARWIN
4257 | CPUMCTX_EXTRN_INHIBIT_INT
4258 | CPUMCTX_EXTRN_INHIBIT_NMI
4259# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4260 | HMVMX_CPUMCTX_EXTRN_ALL
4261# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4262 | CPUMCTX_EXTRN_RFLAGS
4263# endif
4264#else /* IN_NEM_DARWIN */
4265 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4266#endif /* IN_NEM_DARWIN */
4267>
4268DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4269{
4270 RT_NOREF_PV(pszCaller);
4271 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4272 {
4273#ifndef IN_NEM_DARWIN
4274 /*
4275 * We disable interrupts to make the updating of the state and in particular
4276 * the fExtrn modification atomic wrt to preemption hooks.
4277 */
4278 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4279#else
4280 RTCCUINTREG const fEFlags = 0;
4281#endif
4282
4283 /*
4284 * We combine all three parameters and take the (probably) inlined optimized
4285 * code path for the new things specified in a_fWhat.
4286 *
4287 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4288 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4289 * also take the streamlined path when both of these are cleared in fExtrn
4290 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4291 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4292 */
4293 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4294 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4295 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4296 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4297 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4298 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4299 {
4300 int const rc = vmxHCImportGuestStateInner< a_fWhat
4301 & HMVMX_CPUMCTX_EXTRN_ALL
4302 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4303#ifndef IN_NEM_DARWIN
4304 ASMSetFlags(fEFlags);
4305#endif
4306 return rc;
4307 }
4308
4309#ifndef IN_NEM_DARWIN
4310 ASMSetFlags(fEFlags);
4311#endif
4312
4313 /*
4314 * We shouldn't normally get here, but it may happen when executing
4315 * in the debug run-loops. Typically, everything should already have
4316 * been fetched then. Otherwise call the fallback state import function.
4317 */
4318 if (fWhatToDo == 0)
4319 { /* hope the cause was the debug loop or something similar */ }
4320 else
4321 {
4322 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4323 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4324 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4325 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4326 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4327 }
4328 }
4329 return VINF_SUCCESS;
4330}
4331
4332
4333/**
4334 * Check per-VM and per-VCPU force flag actions that require us to go back to
4335 * ring-3 for one reason or another.
4336 *
4337 * @returns Strict VBox status code (i.e. informational status codes too)
4338 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4339 * ring-3.
4340 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4341 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4342 * interrupts)
4343 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4344 * all EMTs to be in ring-3.
4345 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4346 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4347 * to the EM loop.
4348 *
4349 * @param pVCpu The cross context virtual CPU structure.
4350 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4351 * @param fStepping Whether we are single-stepping the guest using the
4352 * hypervisor debugger.
4353 *
4354 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4355 * is no longer in VMX non-root mode.
4356 */
4357static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4358{
4359#ifndef IN_NEM_DARWIN
4360 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4361#endif
4362
4363 /*
4364 * Update pending interrupts into the APIC's IRR.
4365 */
4366 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4367 APICUpdatePendingInterrupts(pVCpu);
4368
4369 /*
4370 * Anything pending? Should be more likely than not if we're doing a good job.
4371 */
4372 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4373 if ( !fStepping
4374 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4375 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4376 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4377 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4378 return VINF_SUCCESS;
4379
4380 /* Pending PGM C3 sync. */
4381 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4382 {
4383 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4384 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4385 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4386 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4387 if (rcStrict != VINF_SUCCESS)
4388 {
4389 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4390 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4391 return rcStrict;
4392 }
4393 }
4394
4395 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4396 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4397 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4398 {
4399 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4400 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4401 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4402 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4403 return rc;
4404 }
4405
4406 /* Pending VM request packets, such as hardware interrupts. */
4407 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4408 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4409 {
4410 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4411 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4412 return VINF_EM_PENDING_REQUEST;
4413 }
4414
4415 /* Pending PGM pool flushes. */
4416 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4417 {
4418 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4419 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4420 return VINF_PGM_POOL_FLUSH_PENDING;
4421 }
4422
4423 /* Pending DMA requests. */
4424 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4425 {
4426 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4427 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4428 return VINF_EM_RAW_TO_R3;
4429 }
4430
4431#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4432 /*
4433 * Pending nested-guest events.
4434 *
4435 * Please note the priority of these events are specified and important.
4436 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4437 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4438 */
4439 if (fIsNestedGuest)
4440 {
4441 /* Pending nested-guest APIC-write. */
4442 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4443 {
4444 Log4Func(("Pending nested-guest APIC-write\n"));
4445 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4446 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4447 return rcStrict;
4448 }
4449
4450 /* Pending nested-guest monitor-trap flag (MTF). */
4451 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4452 {
4453 Log4Func(("Pending nested-guest MTF\n"));
4454 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4455 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4456 return rcStrict;
4457 }
4458
4459 /* Pending nested-guest VMX-preemption timer expired. */
4460 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4461 {
4462 Log4Func(("Pending nested-guest preempt timer\n"));
4463 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4464 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4465 return rcStrict;
4466 }
4467 }
4468#else
4469 NOREF(fIsNestedGuest);
4470#endif
4471
4472 return VINF_SUCCESS;
4473}
4474
4475
4476/**
4477 * Converts any TRPM trap into a pending HM event. This is typically used when
4478 * entering from ring-3 (not longjmp returns).
4479 *
4480 * @param pVCpu The cross context virtual CPU structure.
4481 */
4482static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4483{
4484 Assert(TRPMHasTrap(pVCpu));
4485 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4486
4487 uint8_t uVector;
4488 TRPMEVENT enmTrpmEvent;
4489 uint32_t uErrCode;
4490 RTGCUINTPTR GCPtrFaultAddress;
4491 uint8_t cbInstr;
4492 bool fIcebp;
4493
4494 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4495 AssertRC(rc);
4496
4497 uint32_t u32IntInfo;
4498 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4499 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4500
4501 rc = TRPMResetTrap(pVCpu);
4502 AssertRC(rc);
4503 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4504 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4505
4506 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4507}
4508
4509
4510/**
4511 * Converts the pending HM event into a TRPM trap.
4512 *
4513 * @param pVCpu The cross context virtual CPU structure.
4514 */
4515static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4516{
4517 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4518
4519 /* If a trap was already pending, we did something wrong! */
4520 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4521
4522 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4523 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4524 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4525
4526 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4527
4528 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4529 AssertRC(rc);
4530
4531 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4532 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4533
4534 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4535 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4536 else
4537 {
4538 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4539 switch (uVectorType)
4540 {
4541 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4542 TRPMSetTrapDueToIcebp(pVCpu);
4543 RT_FALL_THRU();
4544 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4545 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4546 {
4547 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4548 || ( uVector == X86_XCPT_BP /* INT3 */
4549 || uVector == X86_XCPT_OF /* INTO */
4550 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4551 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4552 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4553 break;
4554 }
4555 }
4556 }
4557
4558 /* We're now done converting the pending event. */
4559 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4560}
4561
4562
4563/**
4564 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4565 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4566 *
4567 * @param pVCpu The cross context virtual CPU structure.
4568 * @param pVmcsInfo The VMCS info. object.
4569 */
4570static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4571{
4572 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4573 {
4574 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4575 {
4576 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4577 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4578 AssertRC(rc);
4579 }
4580 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4581}
4582
4583
4584/**
4585 * Clears the interrupt-window exiting control in the VMCS.
4586 *
4587 * @param pVCpu The cross context virtual CPU structure.
4588 * @param pVmcsInfo The VMCS info. object.
4589 */
4590DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4591{
4592 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4593 {
4594 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4595 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4596 AssertRC(rc);
4597 }
4598}
4599
4600
4601/**
4602 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4603 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4604 *
4605 * @param pVCpu The cross context virtual CPU structure.
4606 * @param pVmcsInfo The VMCS info. object.
4607 */
4608static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4609{
4610 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4611 {
4612 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4613 {
4614 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4615 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4616 AssertRC(rc);
4617 Log4Func(("Setup NMI-window exiting\n"));
4618 }
4619 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4620}
4621
4622
4623/**
4624 * Clears the NMI-window exiting control in the VMCS.
4625 *
4626 * @param pVCpu The cross context virtual CPU structure.
4627 * @param pVmcsInfo The VMCS info. object.
4628 */
4629DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4630{
4631 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4632 {
4633 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4634 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4635 AssertRC(rc);
4636 }
4637}
4638
4639
4640/**
4641 * Injects an event into the guest upon VM-entry by updating the relevant fields
4642 * in the VM-entry area in the VMCS.
4643 *
4644 * @returns Strict VBox status code (i.e. informational status codes too).
4645 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4646 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4647 *
4648 * @param pVCpu The cross context virtual CPU structure.
4649 * @param pVmcsInfo The VMCS info object.
4650 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4651 * @param pEvent The event being injected.
4652 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4653 * will be updated if necessary. This cannot not be NULL.
4654 * @param fStepping Whether we're single-stepping guest execution and should
4655 * return VINF_EM_DBG_STEPPED if the event is injected
4656 * directly (registers modified by us, not by hardware on
4657 * VM-entry).
4658 */
4659static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4660 bool fStepping, uint32_t *pfIntrState)
4661{
4662 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4663 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4664 Assert(pfIntrState);
4665
4666#ifdef IN_NEM_DARWIN
4667 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4668#endif
4669
4670 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4671 uint32_t u32IntInfo = pEvent->u64IntInfo;
4672 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4673 uint32_t const cbInstr = pEvent->cbInstr;
4674 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4675 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4676 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4677
4678#ifdef VBOX_STRICT
4679 /*
4680 * Validate the error-code-valid bit for hardware exceptions.
4681 * No error codes for exceptions in real-mode.
4682 *
4683 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4684 */
4685 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4686 && !CPUMIsGuestInRealModeEx(pCtx))
4687 {
4688 switch (uVector)
4689 {
4690 case X86_XCPT_PF:
4691 case X86_XCPT_DF:
4692 case X86_XCPT_TS:
4693 case X86_XCPT_NP:
4694 case X86_XCPT_SS:
4695 case X86_XCPT_GP:
4696 case X86_XCPT_AC:
4697 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4698 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4699 RT_FALL_THRU();
4700 default:
4701 break;
4702 }
4703 }
4704
4705 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4706 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4707 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4708#endif
4709
4710 RT_NOREF(uVector);
4711 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4712 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4713 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4714 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4715 {
4716 Assert(uVector <= X86_XCPT_LAST);
4717 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4718 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4719 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4720 }
4721 else
4722 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4723
4724 /*
4725 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4726 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4727 * interrupt handler in the (real-mode) guest.
4728 *
4729 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4730 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4731 */
4732 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4733 {
4734#ifndef IN_NEM_DARWIN
4735 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4736#endif
4737 {
4738 /*
4739 * For CPUs with unrestricted guest execution enabled and with the guest
4740 * in real-mode, we must not set the deliver-error-code bit.
4741 *
4742 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4743 */
4744 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4745 }
4746#ifndef IN_NEM_DARWIN
4747 else
4748 {
4749 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4750 Assert(PDMVmmDevHeapIsEnabled(pVM));
4751 Assert(pVM->hm.s.vmx.pRealModeTSS);
4752 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4753
4754 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4755 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4756 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4757 AssertRCReturn(rc2, rc2);
4758
4759 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4760 size_t const cbIdtEntry = sizeof(X86IDTR16);
4761 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4762 {
4763 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4764 if (uVector == X86_XCPT_DF)
4765 return VINF_EM_RESET;
4766
4767 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4768 No error codes for exceptions in real-mode. */
4769 if (uVector == X86_XCPT_GP)
4770 {
4771 static HMEVENT const s_EventXcptDf
4772 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4773 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4774 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4775 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4776 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4777 }
4778
4779 /*
4780 * If we're injecting an event with no valid IDT entry, inject a #GP.
4781 * No error codes for exceptions in real-mode.
4782 *
4783 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4784 */
4785 static HMEVENT const s_EventXcptGp
4786 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4787 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4788 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4789 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4790 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4791 }
4792
4793 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4794 uint16_t uGuestIp = pCtx->ip;
4795 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4796 {
4797 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4798 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4799 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4800 }
4801 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4802 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4803
4804 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4805 X86IDTR16 IdtEntry;
4806 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4807 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4808 AssertRCReturn(rc2, rc2);
4809
4810 /* Construct the stack frame for the interrupt/exception handler. */
4811 VBOXSTRICTRC rcStrict;
4812 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, (uint16_t)pCtx->eflags.u);
4813 if (rcStrict == VINF_SUCCESS)
4814 {
4815 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4816 if (rcStrict == VINF_SUCCESS)
4817 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4818 }
4819
4820 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4821 if (rcStrict == VINF_SUCCESS)
4822 {
4823 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4824 pCtx->rip = IdtEntry.offSel;
4825 pCtx->cs.Sel = IdtEntry.uSel;
4826 pCtx->cs.ValidSel = IdtEntry.uSel;
4827 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4828 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4829 && uVector == X86_XCPT_PF)
4830 pCtx->cr2 = GCPtrFault;
4831
4832 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4833 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4834 | HM_CHANGED_GUEST_RSP);
4835
4836 /*
4837 * If we delivered a hardware exception (other than an NMI) and if there was
4838 * block-by-STI in effect, we should clear it.
4839 */
4840 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4841 {
4842 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4843 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4844 Log4Func(("Clearing inhibition due to STI\n"));
4845 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4846 }
4847
4848 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4849 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4850
4851 /*
4852 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4853 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4854 */
4855 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4856
4857 /*
4858 * If we eventually support nested-guest execution without unrestricted guest execution,
4859 * we should set fInterceptEvents here.
4860 */
4861 Assert(!fIsNestedGuest);
4862
4863 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4864 if (fStepping)
4865 rcStrict = VINF_EM_DBG_STEPPED;
4866 }
4867 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4868 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4869 return rcStrict;
4870 }
4871#else
4872 RT_NOREF(pVmcsInfo);
4873#endif
4874 }
4875
4876 /*
4877 * Validate.
4878 */
4879 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4880 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4881
4882 /*
4883 * Inject the event into the VMCS.
4884 */
4885 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4886 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4887 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4888 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4889 AssertRC(rc);
4890
4891 /*
4892 * Update guest CR2 if this is a page-fault.
4893 */
4894 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4895 pCtx->cr2 = GCPtrFault;
4896
4897 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4898 return VINF_SUCCESS;
4899}
4900
4901
4902/**
4903 * Evaluates the event to be delivered to the guest and sets it as the pending
4904 * event.
4905 *
4906 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4907 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4908 * NOT restore these force-flags.
4909 *
4910 * @returns Strict VBox status code (i.e. informational status codes too).
4911 * @param pVCpu The cross context virtual CPU structure.
4912 * @param pVmcsInfo The VMCS information structure.
4913 * @param fIsNestedGuest Flag whether the evaluation happens for a nested guest.
4914 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4915 */
4916static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4917{
4918 Assert(pfIntrState);
4919 Assert(!TRPMHasTrap(pVCpu));
4920
4921 /*
4922 * Compute/update guest-interruptibility state related FFs.
4923 * The FFs will be used below while evaluating events to be injected.
4924 */
4925 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4926
4927 /*
4928 * Evaluate if a new event needs to be injected.
4929 * An event that's already pending has already performed all necessary checks.
4930 */
4931 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4932 && !CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
4933 {
4934 /** @todo SMI. SMIs take priority over NMIs. */
4935
4936 /*
4937 * NMIs.
4938 * NMIs take priority over external interrupts.
4939 */
4940#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4941 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4942#endif
4943 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4944 {
4945 /*
4946 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4947 *
4948 * For a nested-guest, the FF always indicates the outer guest's ability to
4949 * receive an NMI while the guest-interruptibility state bit depends on whether
4950 * the nested-hypervisor is using virtual-NMIs.
4951 */
4952 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4953 {
4954#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4955 if ( fIsNestedGuest
4956 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4957 return IEMExecVmxVmexitXcptNmi(pVCpu);
4958#endif
4959 vmxHCSetPendingXcptNmi(pVCpu);
4960 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4961 Log4Func(("NMI pending injection\n"));
4962
4963 /* We've injected the NMI, bail. */
4964 return VINF_SUCCESS;
4965 }
4966 if (!fIsNestedGuest)
4967 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4968 }
4969
4970 /*
4971 * External interrupts (PIC/APIC).
4972 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4973 * We cannot re-request the interrupt from the controller again.
4974 */
4975 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4976 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4977 {
4978 Assert(!DBGFIsStepping(pVCpu));
4979 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4980 AssertRC(rc);
4981
4982 /*
4983 * We must not check EFLAGS directly when executing a nested-guest, use
4984 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4985 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4986 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4987 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4988 *
4989 * See Intel spec. 25.4.1 "Event Blocking".
4990 */
4991 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4992 {
4993#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4994 if ( fIsNestedGuest
4995 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4996 {
4997 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4998 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4999 return rcStrict;
5000 }
5001#endif
5002 uint8_t u8Interrupt;
5003 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5004 if (RT_SUCCESS(rc))
5005 {
5006#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5007 if ( fIsNestedGuest
5008 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5009 {
5010 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
5011 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5012 return rcStrict;
5013 }
5014#endif
5015 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
5016 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
5017 }
5018 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
5019 {
5020 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
5021
5022 if ( !fIsNestedGuest
5023 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
5024 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
5025 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
5026
5027 /*
5028 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
5029 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
5030 * need to re-set this force-flag here.
5031 */
5032 }
5033 else
5034 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
5035
5036 /* We've injected the interrupt or taken necessary action, bail. */
5037 return VINF_SUCCESS;
5038 }
5039 if (!fIsNestedGuest)
5040 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5041 }
5042 }
5043 else if (!fIsNestedGuest)
5044 {
5045 /*
5046 * An event is being injected or we are in an interrupt shadow. Check if another event is
5047 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
5048 * the pending event.
5049 */
5050 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5051 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
5052 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5053 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5054 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5055 }
5056 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
5057
5058 return VINF_SUCCESS;
5059}
5060
5061
5062/**
5063 * Injects any pending events into the guest if the guest is in a state to
5064 * receive them.
5065 *
5066 * @returns Strict VBox status code (i.e. informational status codes too).
5067 * @param pVCpu The cross context virtual CPU structure.
5068 * @param pVmcsInfo The VMCS information structure.
5069 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5070 * @param fIntrState The VT-x guest-interruptibility state.
5071 * @param fStepping Whether we are single-stepping the guest using the
5072 * hypervisor debugger and should return
5073 * VINF_EM_DBG_STEPPED if the event was dispatched
5074 * directly.
5075 */
5076static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5077 uint32_t fIntrState, bool fStepping)
5078{
5079 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5080#ifndef IN_NEM_DARWIN
5081 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5082#endif
5083
5084#ifdef VBOX_STRICT
5085 /*
5086 * Verify guest-interruptibility state.
5087 *
5088 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5089 * since injecting an event may modify the interruptibility state and we must thus always
5090 * use fIntrState.
5091 */
5092 {
5093 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5094 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5095 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5096 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5097 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5098 Assert(!TRPMHasTrap(pVCpu));
5099 NOREF(fBlockMovSS); NOREF(fBlockSti);
5100 }
5101#endif
5102
5103 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5104 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5105 {
5106 /*
5107 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5108 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5109 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5110 *
5111 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5112 */
5113 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5114#ifdef VBOX_STRICT
5115 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5116 {
5117 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5118 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5119 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5120 }
5121 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5122 {
5123 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5124 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5125 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5126 }
5127#endif
5128 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5129 uIntType));
5130
5131 /*
5132 * Inject the event and get any changes to the guest-interruptibility state.
5133 *
5134 * The guest-interruptibility state may need to be updated if we inject the event
5135 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5136 */
5137 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5138 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5139
5140 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5141 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5142 else
5143 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5144 }
5145
5146 /*
5147 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5148 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5149 */
5150 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5151 && !fIsNestedGuest)
5152 {
5153 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5154
5155 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5156 {
5157 /*
5158 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5159 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5160 */
5161 Assert(!DBGFIsStepping(pVCpu));
5162 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_TF);
5163 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
5164 fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5165 AssertRC(rc);
5166 }
5167 else
5168 {
5169 /*
5170 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5171 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5172 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5173 * we use MTF, so just make sure it's called before executing guest-code.
5174 */
5175 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5176 }
5177 }
5178 /* else: for nested-guest currently handling while merging controls. */
5179
5180 /*
5181 * Finally, update the guest-interruptibility state.
5182 *
5183 * This is required for the real-on-v86 software interrupt injection, for
5184 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5185 */
5186 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5187 AssertRC(rc);
5188
5189 /*
5190 * There's no need to clear the VM-entry interruption-information field here if we're not
5191 * injecting anything. VT-x clears the valid bit on every VM-exit.
5192 *
5193 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5194 */
5195
5196 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5197 return rcStrict;
5198}
5199
5200
5201/**
5202 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5203 * and update error record fields accordingly.
5204 *
5205 * @returns VMX_IGS_* error codes.
5206 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5207 * wrong with the guest state.
5208 *
5209 * @param pVCpu The cross context virtual CPU structure.
5210 * @param pVmcsInfo The VMCS info. object.
5211 *
5212 * @remarks This function assumes our cache of the VMCS controls
5213 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5214 */
5215static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5216{
5217#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5218#define HMVMX_CHECK_BREAK(expr, err) do { \
5219 if (!(expr)) { uError = (err); break; } \
5220 } while (0)
5221
5222 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5223 uint32_t uError = VMX_IGS_ERROR;
5224 uint32_t u32IntrState = 0;
5225#ifndef IN_NEM_DARWIN
5226 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5227 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5228#else
5229 bool const fUnrestrictedGuest = true;
5230#endif
5231 do
5232 {
5233 int rc;
5234
5235 /*
5236 * Guest-interruptibility state.
5237 *
5238 * Read this first so that any check that fails prior to those that actually
5239 * require the guest-interruptibility state would still reflect the correct
5240 * VMCS value and avoids causing further confusion.
5241 */
5242 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5243 AssertRC(rc);
5244
5245 uint32_t u32Val;
5246 uint64_t u64Val;
5247
5248 /*
5249 * CR0.
5250 */
5251 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5252 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5253 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5254 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5255 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5256 if (fUnrestrictedGuest)
5257 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5258
5259 uint64_t u64GuestCr0;
5260 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5261 AssertRC(rc);
5262 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5263 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5264 if ( !fUnrestrictedGuest
5265 && (u64GuestCr0 & X86_CR0_PG)
5266 && !(u64GuestCr0 & X86_CR0_PE))
5267 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5268
5269 /*
5270 * CR4.
5271 */
5272 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5273 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5274 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5275
5276 uint64_t u64GuestCr4;
5277 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5278 AssertRC(rc);
5279 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5280 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5281
5282 /*
5283 * IA32_DEBUGCTL MSR.
5284 */
5285 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5286 AssertRC(rc);
5287 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5288 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5289 {
5290 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5291 }
5292 uint64_t u64DebugCtlMsr = u64Val;
5293
5294#ifdef VBOX_STRICT
5295 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5296 AssertRC(rc);
5297 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5298#endif
5299 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5300
5301 /*
5302 * RIP and RFLAGS.
5303 */
5304 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5305 AssertRC(rc);
5306 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5307 if ( !fLongModeGuest
5308 || !pCtx->cs.Attr.n.u1Long)
5309 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5310 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5311 * must be identical if the "IA-32e mode guest" VM-entry
5312 * control is 1 and CS.L is 1. No check applies if the
5313 * CPU supports 64 linear-address bits. */
5314
5315 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5316 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5317 AssertRC(rc);
5318 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5319 VMX_IGS_RFLAGS_RESERVED);
5320 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5321 uint32_t const u32Eflags = u64Val;
5322
5323 if ( fLongModeGuest
5324 || ( fUnrestrictedGuest
5325 && !(u64GuestCr0 & X86_CR0_PE)))
5326 {
5327 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5328 }
5329
5330 uint32_t u32EntryInfo;
5331 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5332 AssertRC(rc);
5333 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5334 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5335
5336 /*
5337 * 64-bit checks.
5338 */
5339 if (fLongModeGuest)
5340 {
5341 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5342 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5343 }
5344
5345 if ( !fLongModeGuest
5346 && (u64GuestCr4 & X86_CR4_PCIDE))
5347 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5348
5349 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5350 * 51:32 beyond the processor's physical-address width are 0. */
5351
5352 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5353 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5354 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5355
5356#ifndef IN_NEM_DARWIN
5357 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5358 AssertRC(rc);
5359 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5360
5361 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5362 AssertRC(rc);
5363 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5364#endif
5365
5366 /*
5367 * PERF_GLOBAL MSR.
5368 */
5369 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5370 {
5371 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5372 AssertRC(rc);
5373 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5374 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5375 }
5376
5377 /*
5378 * PAT MSR.
5379 */
5380 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5381 {
5382 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5383 AssertRC(rc);
5384 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5385 for (unsigned i = 0; i < 8; i++)
5386 {
5387 uint8_t u8Val = (u64Val & 0xff);
5388 if ( u8Val != 0 /* UC */
5389 && u8Val != 1 /* WC */
5390 && u8Val != 4 /* WT */
5391 && u8Val != 5 /* WP */
5392 && u8Val != 6 /* WB */
5393 && u8Val != 7 /* UC- */)
5394 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5395 u64Val >>= 8;
5396 }
5397 }
5398
5399 /*
5400 * EFER MSR.
5401 */
5402 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5403 {
5404 Assert(g_fHmVmxSupportsVmcsEfer);
5405 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5406 AssertRC(rc);
5407 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5408 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5409 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5410 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5411 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5412 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5413 * iemVmxVmentryCheckGuestState(). */
5414 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5415 || !(u64GuestCr0 & X86_CR0_PG)
5416 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5417 VMX_IGS_EFER_LMA_LME_MISMATCH);
5418 }
5419
5420 /*
5421 * Segment registers.
5422 */
5423 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5424 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5425 if (!(u32Eflags & X86_EFL_VM))
5426 {
5427 /* CS */
5428 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5429 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5430 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5431 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5432 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5433 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5434 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5435 /* CS cannot be loaded with NULL in protected mode. */
5436 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5437 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5438 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5439 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5440 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5441 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5442 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5443 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5444 else
5445 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5446
5447 /* SS */
5448 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5449 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5450 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5451 if ( !(pCtx->cr0 & X86_CR0_PE)
5452 || pCtx->cs.Attr.n.u4Type == 3)
5453 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5454
5455 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5456 {
5457 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5458 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5459 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5460 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5461 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5462 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5463 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5464 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5465 }
5466
5467 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5468 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5469 {
5470 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5471 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5472 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5473 || pCtx->ds.Attr.n.u4Type > 11
5474 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5475 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5476 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5477 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5478 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5479 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5480 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5481 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5482 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5483 }
5484 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5485 {
5486 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5487 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5488 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5489 || pCtx->es.Attr.n.u4Type > 11
5490 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5491 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5492 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5493 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5494 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5495 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5496 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5497 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5498 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5499 }
5500 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5501 {
5502 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5503 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5504 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5505 || pCtx->fs.Attr.n.u4Type > 11
5506 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5507 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5508 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5509 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5510 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5511 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5512 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5513 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5514 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5515 }
5516 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5517 {
5518 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5519 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5520 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5521 || pCtx->gs.Attr.n.u4Type > 11
5522 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5523 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5524 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5525 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5526 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5527 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5528 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5529 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5530 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5531 }
5532 /* 64-bit capable CPUs. */
5533 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5534 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5535 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5536 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5537 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5538 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5539 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5540 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5541 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5542 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5543 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5544 }
5545 else
5546 {
5547 /* V86 mode checks. */
5548 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5549 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5550 {
5551 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5552 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5553 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5554 }
5555 else
5556 {
5557 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5558 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5559 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5560 }
5561
5562 /* CS */
5563 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5564 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5565 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5566 /* SS */
5567 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5568 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5569 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5570 /* DS */
5571 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5572 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5573 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5574 /* ES */
5575 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5576 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5577 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5578 /* FS */
5579 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5580 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5581 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5582 /* GS */
5583 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5584 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5585 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5586 /* 64-bit capable CPUs. */
5587 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5588 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5589 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5590 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5591 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5592 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5593 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5594 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5595 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5596 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5597 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5598 }
5599
5600 /*
5601 * TR.
5602 */
5603 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5604 /* 64-bit capable CPUs. */
5605 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5606 if (fLongModeGuest)
5607 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5608 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5609 else
5610 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5611 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5612 VMX_IGS_TR_ATTR_TYPE_INVALID);
5613 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5614 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5615 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5616 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5617 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5618 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5619 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5620 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5621
5622 /*
5623 * GDTR and IDTR (64-bit capable checks).
5624 */
5625 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5626 AssertRC(rc);
5627 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5628
5629 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5630 AssertRC(rc);
5631 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5632
5633 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5634 AssertRC(rc);
5635 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5636
5637 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5638 AssertRC(rc);
5639 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5640
5641 /*
5642 * Guest Non-Register State.
5643 */
5644 /* Activity State. */
5645 uint32_t u32ActivityState;
5646 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5647 AssertRC(rc);
5648 HMVMX_CHECK_BREAK( !u32ActivityState
5649 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5650 VMX_IGS_ACTIVITY_STATE_INVALID);
5651 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5652 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5653
5654 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5655 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5656 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5657
5658 /** @todo Activity state and injecting interrupts. Left as a todo since we
5659 * currently don't use activity states but ACTIVE. */
5660
5661 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5662 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5663
5664 /* Guest interruptibility-state. */
5665 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5666 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5667 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5668 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5669 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5670 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5671 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5672 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5673 {
5674 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5675 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5676 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5677 }
5678 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5679 {
5680 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5681 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5682 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5683 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5684 }
5685 /** @todo Assumes the processor is not in SMM. */
5686 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5687 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5688 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5689 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5690 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5691 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5692 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5693 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5694
5695 /* Pending debug exceptions. */
5696 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5697 AssertRC(rc);
5698 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5699 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5700 u32Val = u64Val; /* For pending debug exceptions checks below. */
5701
5702 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5703 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5704 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5705 {
5706 if ( (u32Eflags & X86_EFL_TF)
5707 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5708 {
5709 /* Bit 14 is PendingDebug.BS. */
5710 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5711 }
5712 if ( !(u32Eflags & X86_EFL_TF)
5713 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5714 {
5715 /* Bit 14 is PendingDebug.BS. */
5716 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5717 }
5718 }
5719
5720#ifndef IN_NEM_DARWIN
5721 /* VMCS link pointer. */
5722 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5723 AssertRC(rc);
5724 if (u64Val != UINT64_C(0xffffffffffffffff))
5725 {
5726 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5727 /** @todo Bits beyond the processor's physical-address width MBZ. */
5728 /** @todo SMM checks. */
5729 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5730 Assert(pVmcsInfo->pvShadowVmcs);
5731 VMXVMCSREVID VmcsRevId;
5732 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5733 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5734 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5735 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5736 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5737 }
5738
5739 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5740 * not using nested paging? */
5741 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5742 && !fLongModeGuest
5743 && CPUMIsGuestInPAEModeEx(pCtx))
5744 {
5745 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5746 AssertRC(rc);
5747 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5748
5749 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5750 AssertRC(rc);
5751 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5752
5753 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5754 AssertRC(rc);
5755 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5756
5757 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5758 AssertRC(rc);
5759 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5760 }
5761#endif
5762
5763 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5764 if (uError == VMX_IGS_ERROR)
5765 uError = VMX_IGS_REASON_NOT_FOUND;
5766 } while (0);
5767
5768 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5769 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5770 return uError;
5771
5772#undef HMVMX_ERROR_BREAK
5773#undef HMVMX_CHECK_BREAK
5774}
5775
5776
5777#ifndef HMVMX_USE_FUNCTION_TABLE
5778/**
5779 * Handles a guest VM-exit from hardware-assisted VMX execution.
5780 *
5781 * @returns Strict VBox status code (i.e. informational status codes too).
5782 * @param pVCpu The cross context virtual CPU structure.
5783 * @param pVmxTransient The VMX-transient structure.
5784 */
5785DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5786{
5787#ifdef DEBUG_ramshankar
5788# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5789 do { \
5790 if (a_fSave != 0) \
5791 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5792 VBOXSTRICTRC rcStrict = a_CallExpr; \
5793 if (a_fSave != 0) \
5794 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5795 return rcStrict; \
5796 } while (0)
5797#else
5798# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5799#endif
5800 uint32_t const uExitReason = pVmxTransient->uExitReason;
5801 switch (uExitReason)
5802 {
5803 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5804 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5805 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5806 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5807 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5808 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5809 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5810 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5811 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5812 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5813 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5814 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5815 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5816 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5817 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5818 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5819 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5820 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5821 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5822 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5823 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5824 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5825 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5826 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5827 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5828 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5829 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5830 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5831 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5832 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5833#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5834 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5835 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5836 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5837 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5838 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5839 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5840 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5841 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5842 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5843 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5844#else
5845 case VMX_EXIT_VMCLEAR:
5846 case VMX_EXIT_VMLAUNCH:
5847 case VMX_EXIT_VMPTRLD:
5848 case VMX_EXIT_VMPTRST:
5849 case VMX_EXIT_VMREAD:
5850 case VMX_EXIT_VMRESUME:
5851 case VMX_EXIT_VMWRITE:
5852 case VMX_EXIT_VMXOFF:
5853 case VMX_EXIT_VMXON:
5854 case VMX_EXIT_INVVPID:
5855 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5856#endif
5857#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5858 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5859#else
5860 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5861#endif
5862
5863 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5864 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5865 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5866
5867 case VMX_EXIT_INIT_SIGNAL:
5868 case VMX_EXIT_SIPI:
5869 case VMX_EXIT_IO_SMI:
5870 case VMX_EXIT_SMI:
5871 case VMX_EXIT_ERR_MSR_LOAD:
5872 case VMX_EXIT_ERR_MACHINE_CHECK:
5873 case VMX_EXIT_PML_FULL:
5874 case VMX_EXIT_VIRTUALIZED_EOI:
5875 case VMX_EXIT_GDTR_IDTR_ACCESS:
5876 case VMX_EXIT_LDTR_TR_ACCESS:
5877 case VMX_EXIT_APIC_WRITE:
5878 case VMX_EXIT_RDRAND:
5879 case VMX_EXIT_RSM:
5880 case VMX_EXIT_VMFUNC:
5881 case VMX_EXIT_ENCLS:
5882 case VMX_EXIT_RDSEED:
5883 case VMX_EXIT_XSAVES:
5884 case VMX_EXIT_XRSTORS:
5885 case VMX_EXIT_UMWAIT:
5886 case VMX_EXIT_TPAUSE:
5887 case VMX_EXIT_LOADIWKEY:
5888 default:
5889 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5890 }
5891#undef VMEXIT_CALL_RET
5892}
5893#endif /* !HMVMX_USE_FUNCTION_TABLE */
5894
5895
5896#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5897/**
5898 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5899 *
5900 * @returns Strict VBox status code (i.e. informational status codes too).
5901 * @param pVCpu The cross context virtual CPU structure.
5902 * @param pVmxTransient The VMX-transient structure.
5903 */
5904DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5905{
5906 uint32_t const uExitReason = pVmxTransient->uExitReason;
5907 switch (uExitReason)
5908 {
5909# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5910 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5911 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5912# else
5913 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5914 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5915# endif
5916 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5917 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5918 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5919
5920 /*
5921 * We shouldn't direct host physical interrupts to the nested-guest.
5922 */
5923 case VMX_EXIT_EXT_INT:
5924 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5925
5926 /*
5927 * Instructions that cause VM-exits unconditionally or the condition is
5928 * always taken solely from the nested hypervisor (meaning if the VM-exit
5929 * happens, it's guaranteed to be a nested-guest VM-exit).
5930 *
5931 * - Provides VM-exit instruction length ONLY.
5932 */
5933 case VMX_EXIT_CPUID: /* Unconditional. */
5934 case VMX_EXIT_VMCALL:
5935 case VMX_EXIT_GETSEC:
5936 case VMX_EXIT_INVD:
5937 case VMX_EXIT_XSETBV:
5938 case VMX_EXIT_VMLAUNCH:
5939 case VMX_EXIT_VMRESUME:
5940 case VMX_EXIT_VMXOFF:
5941 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5942 case VMX_EXIT_VMFUNC:
5943 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5944
5945 /*
5946 * Instructions that cause VM-exits unconditionally or the condition is
5947 * always taken solely from the nested hypervisor (meaning if the VM-exit
5948 * happens, it's guaranteed to be a nested-guest VM-exit).
5949 *
5950 * - Provides VM-exit instruction length.
5951 * - Provides VM-exit information.
5952 * - Optionally provides Exit qualification.
5953 *
5954 * Since Exit qualification is 0 for all VM-exits where it is not
5955 * applicable, reading and passing it to the guest should produce
5956 * defined behavior.
5957 *
5958 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5959 */
5960 case VMX_EXIT_INVEPT: /* Unconditional. */
5961 case VMX_EXIT_INVVPID:
5962 case VMX_EXIT_VMCLEAR:
5963 case VMX_EXIT_VMPTRLD:
5964 case VMX_EXIT_VMPTRST:
5965 case VMX_EXIT_VMXON:
5966 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5967 case VMX_EXIT_LDTR_TR_ACCESS:
5968 case VMX_EXIT_RDRAND:
5969 case VMX_EXIT_RDSEED:
5970 case VMX_EXIT_XSAVES:
5971 case VMX_EXIT_XRSTORS:
5972 case VMX_EXIT_UMWAIT:
5973 case VMX_EXIT_TPAUSE:
5974 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5975
5976 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5977 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5978 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5979 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5980 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5981 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5982 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5983 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5984 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5985 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5986 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5987 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5988 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5989 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5990 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5991 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5992 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5993 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5994 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5995
5996 case VMX_EXIT_PREEMPT_TIMER:
5997 {
5998 /** @todo NSTVMX: Preempt timer. */
5999 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
6000 }
6001
6002 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
6003 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
6004
6005 case VMX_EXIT_VMREAD:
6006 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
6007
6008 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
6009 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
6010
6011 case VMX_EXIT_INIT_SIGNAL:
6012 case VMX_EXIT_SIPI:
6013 case VMX_EXIT_IO_SMI:
6014 case VMX_EXIT_SMI:
6015 case VMX_EXIT_ERR_MSR_LOAD:
6016 case VMX_EXIT_ERR_MACHINE_CHECK:
6017 case VMX_EXIT_PML_FULL:
6018 case VMX_EXIT_RSM:
6019 default:
6020 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6021 }
6022}
6023#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6024
6025
6026/** @name VM-exit helpers.
6027 * @{
6028 */
6029/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6030/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6031/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6032
6033/** Macro for VM-exits called unexpectedly. */
6034#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
6035 do { \
6036 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6037 return VERR_VMX_UNEXPECTED_EXIT; \
6038 } while (0)
6039
6040#ifdef VBOX_STRICT
6041# ifndef IN_NEM_DARWIN
6042/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6043# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6044 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6045
6046# define HMVMX_ASSERT_PREEMPT_CPUID() \
6047 do { \
6048 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6049 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6050 } while (0)
6051
6052# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6053 do { \
6054 AssertPtr((a_pVCpu)); \
6055 AssertPtr((a_pVmxTransient)); \
6056 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6057 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6058 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6059 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6060 Assert((a_pVmxTransient)->pVmcsInfo); \
6061 Assert(ASMIntAreEnabled()); \
6062 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6063 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6064 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6065 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6066 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6067 HMVMX_ASSERT_PREEMPT_CPUID(); \
6068 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6069 } while (0)
6070# else
6071# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6072# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6073# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6074 do { \
6075 AssertPtr((a_pVCpu)); \
6076 AssertPtr((a_pVmxTransient)); \
6077 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6078 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6079 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6080 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6081 Assert((a_pVmxTransient)->pVmcsInfo); \
6082 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6083 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6084 } while (0)
6085# endif
6086
6087# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6088 do { \
6089 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6090 Assert((a_pVmxTransient)->fIsNestedGuest); \
6091 } while (0)
6092
6093# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6094 do { \
6095 Log4Func(("\n")); \
6096 } while (0)
6097#else
6098# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6099 do { \
6100 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6101 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6102 } while (0)
6103
6104# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6105 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6106
6107# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6108#endif
6109
6110#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6111/** Macro that does the necessary privilege checks and intercepted VM-exits for
6112 * guests that attempted to execute a VMX instruction. */
6113# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6114 do \
6115 { \
6116 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6117 if (rcStrictTmp == VINF_SUCCESS) \
6118 { /* likely */ } \
6119 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6120 { \
6121 Assert((a_pVCpu)->hm.s.Event.fPending); \
6122 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6123 return VINF_SUCCESS; \
6124 } \
6125 else \
6126 { \
6127 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6128 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6129 } \
6130 } while (0)
6131
6132/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6133# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6134 do \
6135 { \
6136 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6137 (a_pGCPtrEffAddr)); \
6138 if (rcStrictTmp == VINF_SUCCESS) \
6139 { /* likely */ } \
6140 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6141 { \
6142 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6143 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6144 NOREF(uXcptTmp); \
6145 return VINF_SUCCESS; \
6146 } \
6147 else \
6148 { \
6149 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6150 return rcStrictTmp; \
6151 } \
6152 } while (0)
6153#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6154
6155
6156/**
6157 * Advances the guest RIP by the specified number of bytes.
6158 *
6159 * @param pVCpu The cross context virtual CPU structure.
6160 * @param cbInstr Number of bytes to advance the RIP by.
6161 *
6162 * @remarks No-long-jump zone!!!
6163 */
6164DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6165{
6166 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
6167
6168 /*
6169 * Advance RIP.
6170 *
6171 * The upper 32 bits are only set when in 64-bit mode, so we have to detect
6172 * when the addition causes a "carry" into the upper half and check whether
6173 * we're in 64-bit and can go on with it or wether we should zap the top
6174 * half. (Note! The 8086, 80186 and 80286 emulation is done exclusively in
6175 * IEM, so we don't need to bother with pre-386 16-bit wraparound.)
6176 *
6177 * See PC wrap around tests in bs3-cpu-weird-1.
6178 */
6179 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
6180 uint64_t const uRipNext = uRipPrev + cbInstr;
6181 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & RT_BIT_64(32))
6182 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
6183 pVCpu->cpum.GstCtx.rip = uRipNext;
6184 else
6185 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
6186
6187 /*
6188 * Clear RF and interrupt shadowing.
6189 */
6190 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF))))
6191 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
6192 else
6193 {
6194 if ((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF)) == X86_EFL_TF)
6195 {
6196 /** @todo \#DB - single step. */
6197 }
6198 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
6199 }
6200 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
6201
6202 /* Mark both RIP and RFLAGS as updated. */
6203 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6204}
6205
6206
6207/**
6208 * Advances the guest RIP after reading it from the VMCS.
6209 *
6210 * @returns VBox status code, no informational status codes.
6211 * @param pVCpu The cross context virtual CPU structure.
6212 * @param pVmxTransient The VMX-transient structure.
6213 *
6214 * @remarks No-long-jump zone!!!
6215 */
6216static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6217{
6218 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6219 /** @todo consider template here after checking callers. */
6220 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6221 AssertRCReturn(rc, rc);
6222
6223 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6224 return VINF_SUCCESS;
6225}
6226
6227
6228/**
6229 * Handle a condition that occurred while delivering an event through the guest or
6230 * nested-guest IDT.
6231 *
6232 * @returns Strict VBox status code (i.e. informational status codes too).
6233 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6234 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6235 * to continue execution of the guest which will delivery the \#DF.
6236 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6237 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6238 *
6239 * @param pVCpu The cross context virtual CPU structure.
6240 * @param pVmxTransient The VMX-transient structure.
6241 *
6242 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6243 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6244 * is due to an EPT violation, PML full or SPP-related event.
6245 *
6246 * @remarks No-long-jump zone!!!
6247 */
6248static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6249{
6250 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6251 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6252 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6253 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6254 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6255 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6256
6257 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6258 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6259 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6260 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6261 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6262 {
6263 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6264 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6265
6266 /*
6267 * If the event was a software interrupt (generated with INT n) or a software exception
6268 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6269 * can handle the VM-exit and continue guest execution which will re-execute the
6270 * instruction rather than re-injecting the exception, as that can cause premature
6271 * trips to ring-3 before injection and involve TRPM which currently has no way of
6272 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6273 * the problem).
6274 */
6275 IEMXCPTRAISE enmRaise;
6276 IEMXCPTRAISEINFO fRaiseInfo;
6277 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6278 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6279 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6280 {
6281 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6282 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6283 }
6284 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6285 {
6286 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6287 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6288 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6289
6290 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6291 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6292
6293 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6294
6295 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6296 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6297 {
6298 pVmxTransient->fVectoringPF = true;
6299 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6300 }
6301 }
6302 else
6303 {
6304 /*
6305 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6306 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6307 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6308 */
6309 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6310 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6311 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6312 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6313 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6314 }
6315
6316 /*
6317 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6318 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6319 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6320 * subsequent VM-entry would fail, see @bugref{7445}.
6321 *
6322 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6323 */
6324 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6325 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6326 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6327 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6328 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6329
6330 switch (enmRaise)
6331 {
6332 case IEMXCPTRAISE_CURRENT_XCPT:
6333 {
6334 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6335 Assert(rcStrict == VINF_SUCCESS);
6336 break;
6337 }
6338
6339 case IEMXCPTRAISE_PREV_EVENT:
6340 {
6341 uint32_t u32ErrCode;
6342 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6343 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6344 else
6345 u32ErrCode = 0;
6346
6347 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6348 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6349 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6350 pVCpu->cpum.GstCtx.cr2);
6351
6352 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6353 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6354 Assert(rcStrict == VINF_SUCCESS);
6355 break;
6356 }
6357
6358 case IEMXCPTRAISE_REEXEC_INSTR:
6359 Assert(rcStrict == VINF_SUCCESS);
6360 break;
6361
6362 case IEMXCPTRAISE_DOUBLE_FAULT:
6363 {
6364 /*
6365 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6366 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6367 */
6368 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6369 {
6370 pVmxTransient->fVectoringDoublePF = true;
6371 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6372 pVCpu->cpum.GstCtx.cr2));
6373 rcStrict = VINF_SUCCESS;
6374 }
6375 else
6376 {
6377 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6378 vmxHCSetPendingXcptDF(pVCpu);
6379 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6380 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6381 rcStrict = VINF_HM_DOUBLE_FAULT;
6382 }
6383 break;
6384 }
6385
6386 case IEMXCPTRAISE_TRIPLE_FAULT:
6387 {
6388 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6389 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6390 rcStrict = VINF_EM_RESET;
6391 break;
6392 }
6393
6394 case IEMXCPTRAISE_CPU_HANG:
6395 {
6396 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6397 rcStrict = VERR_EM_GUEST_CPU_HANG;
6398 break;
6399 }
6400
6401 default:
6402 {
6403 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6404 rcStrict = VERR_VMX_IPE_2;
6405 break;
6406 }
6407 }
6408 }
6409 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6410 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6411 {
6412 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6413 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6414 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6415 {
6416 /*
6417 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6418 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6419 * that virtual NMIs remain blocked until the IRET execution is completed.
6420 *
6421 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6422 */
6423 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6424 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6425 }
6426 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6427 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6428 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6429 {
6430 /*
6431 * Execution of IRET caused an EPT violation, page-modification log-full event or
6432 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6433 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6434 * that virtual NMIs remain blocked until the IRET execution is completed.
6435 *
6436 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6437 */
6438 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6439 {
6440 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6441 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6442 }
6443 }
6444 }
6445
6446 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6447 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6448 return rcStrict;
6449}
6450
6451
6452#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6453/**
6454 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6455 * guest attempting to execute a VMX instruction.
6456 *
6457 * @returns Strict VBox status code (i.e. informational status codes too).
6458 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6459 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6460 *
6461 * @param pVCpu The cross context virtual CPU structure.
6462 * @param uExitReason The VM-exit reason.
6463 *
6464 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6465 * @remarks No-long-jump zone!!!
6466 */
6467static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6468{
6469 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6470 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6471
6472 /*
6473 * The physical CPU would have already checked the CPU mode/code segment.
6474 * We shall just assert here for paranoia.
6475 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6476 */
6477 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6478 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6479 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6480
6481 if (uExitReason == VMX_EXIT_VMXON)
6482 {
6483 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6484
6485 /*
6486 * We check CR4.VMXE because it is required to be always set while in VMX operation
6487 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6488 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6489 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6490 */
6491 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6492 {
6493 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6494 vmxHCSetPendingXcptUD(pVCpu);
6495 return VINF_HM_PENDING_XCPT;
6496 }
6497 }
6498 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6499 {
6500 /*
6501 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6502 * (other than VMXON), we need to raise a #UD.
6503 */
6504 Log4Func(("Not in VMX root mode -> #UD\n"));
6505 vmxHCSetPendingXcptUD(pVCpu);
6506 return VINF_HM_PENDING_XCPT;
6507 }
6508
6509 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6510 return VINF_SUCCESS;
6511}
6512
6513
6514/**
6515 * Decodes the memory operand of an instruction that caused a VM-exit.
6516 *
6517 * The Exit qualification field provides the displacement field for memory
6518 * operand instructions, if any.
6519 *
6520 * @returns Strict VBox status code (i.e. informational status codes too).
6521 * @retval VINF_SUCCESS if the operand was successfully decoded.
6522 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6523 * operand.
6524 * @param pVCpu The cross context virtual CPU structure.
6525 * @param uExitInstrInfo The VM-exit instruction information field.
6526 * @param enmMemAccess The memory operand's access type (read or write).
6527 * @param GCPtrDisp The instruction displacement field, if any. For
6528 * RIP-relative addressing pass RIP + displacement here.
6529 * @param pGCPtrMem Where to store the effective destination memory address.
6530 *
6531 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6532 * virtual-8086 mode hence skips those checks while verifying if the
6533 * segment is valid.
6534 */
6535static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6536 PRTGCPTR pGCPtrMem)
6537{
6538 Assert(pGCPtrMem);
6539 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6540 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6541 | CPUMCTX_EXTRN_CR0);
6542
6543 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6544 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6545 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6546
6547 VMXEXITINSTRINFO ExitInstrInfo;
6548 ExitInstrInfo.u = uExitInstrInfo;
6549 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6550 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6551 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6552 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6553 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6554 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6555 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6556 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6557 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6558
6559 /*
6560 * Validate instruction information.
6561 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6562 */
6563 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6564 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6565 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6566 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6567 AssertLogRelMsgReturn(fIsMemOperand,
6568 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6569
6570 /*
6571 * Compute the complete effective address.
6572 *
6573 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6574 * See AMD spec. 4.5.2 "Segment Registers".
6575 */
6576 RTGCPTR GCPtrMem = GCPtrDisp;
6577 if (fBaseRegValid)
6578 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6579 if (fIdxRegValid)
6580 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6581
6582 RTGCPTR const GCPtrOff = GCPtrMem;
6583 if ( !fIsLongMode
6584 || iSegReg >= X86_SREG_FS)
6585 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6586 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6587
6588 /*
6589 * Validate effective address.
6590 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6591 */
6592 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6593 Assert(cbAccess > 0);
6594 if (fIsLongMode)
6595 {
6596 if (X86_IS_CANONICAL(GCPtrMem))
6597 {
6598 *pGCPtrMem = GCPtrMem;
6599 return VINF_SUCCESS;
6600 }
6601
6602 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6603 * "Data Limit Checks in 64-bit Mode". */
6604 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6605 vmxHCSetPendingXcptGP(pVCpu, 0);
6606 return VINF_HM_PENDING_XCPT;
6607 }
6608
6609 /*
6610 * This is a watered down version of iemMemApplySegment().
6611 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6612 * and segment CPL/DPL checks are skipped.
6613 */
6614 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6615 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6616 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6617
6618 /* Check if the segment is present and usable. */
6619 if ( pSel->Attr.n.u1Present
6620 && !pSel->Attr.n.u1Unusable)
6621 {
6622 Assert(pSel->Attr.n.u1DescType);
6623 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6624 {
6625 /* Check permissions for the data segment. */
6626 if ( enmMemAccess == VMXMEMACCESS_WRITE
6627 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6628 {
6629 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6630 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6631 return VINF_HM_PENDING_XCPT;
6632 }
6633
6634 /* Check limits if it's a normal data segment. */
6635 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6636 {
6637 if ( GCPtrFirst32 > pSel->u32Limit
6638 || GCPtrLast32 > pSel->u32Limit)
6639 {
6640 Log4Func(("Data segment limit exceeded. "
6641 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6642 GCPtrLast32, pSel->u32Limit));
6643 if (iSegReg == X86_SREG_SS)
6644 vmxHCSetPendingXcptSS(pVCpu, 0);
6645 else
6646 vmxHCSetPendingXcptGP(pVCpu, 0);
6647 return VINF_HM_PENDING_XCPT;
6648 }
6649 }
6650 else
6651 {
6652 /* Check limits if it's an expand-down data segment.
6653 Note! The upper boundary is defined by the B bit, not the G bit! */
6654 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6655 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6656 {
6657 Log4Func(("Expand-down data segment limit exceeded. "
6658 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6659 GCPtrLast32, pSel->u32Limit));
6660 if (iSegReg == X86_SREG_SS)
6661 vmxHCSetPendingXcptSS(pVCpu, 0);
6662 else
6663 vmxHCSetPendingXcptGP(pVCpu, 0);
6664 return VINF_HM_PENDING_XCPT;
6665 }
6666 }
6667 }
6668 else
6669 {
6670 /* Check permissions for the code segment. */
6671 if ( enmMemAccess == VMXMEMACCESS_WRITE
6672 || ( enmMemAccess == VMXMEMACCESS_READ
6673 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6674 {
6675 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6676 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6677 vmxHCSetPendingXcptGP(pVCpu, 0);
6678 return VINF_HM_PENDING_XCPT;
6679 }
6680
6681 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6682 if ( GCPtrFirst32 > pSel->u32Limit
6683 || GCPtrLast32 > pSel->u32Limit)
6684 {
6685 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6686 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6687 if (iSegReg == X86_SREG_SS)
6688 vmxHCSetPendingXcptSS(pVCpu, 0);
6689 else
6690 vmxHCSetPendingXcptGP(pVCpu, 0);
6691 return VINF_HM_PENDING_XCPT;
6692 }
6693 }
6694 }
6695 else
6696 {
6697 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6698 vmxHCSetPendingXcptGP(pVCpu, 0);
6699 return VINF_HM_PENDING_XCPT;
6700 }
6701
6702 *pGCPtrMem = GCPtrMem;
6703 return VINF_SUCCESS;
6704}
6705#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6706
6707
6708/**
6709 * VM-exit helper for LMSW.
6710 */
6711static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6712{
6713 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6714 AssertRCReturn(rc, rc);
6715
6716 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6717 AssertMsg( rcStrict == VINF_SUCCESS
6718 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6719
6720 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6721 if (rcStrict == VINF_IEM_RAISED_XCPT)
6722 {
6723 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6724 rcStrict = VINF_SUCCESS;
6725 }
6726
6727 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6728 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6729 return rcStrict;
6730}
6731
6732
6733/**
6734 * VM-exit helper for CLTS.
6735 */
6736static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6737{
6738 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6739 AssertRCReturn(rc, rc);
6740
6741 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6742 AssertMsg( rcStrict == VINF_SUCCESS
6743 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6744
6745 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6746 if (rcStrict == VINF_IEM_RAISED_XCPT)
6747 {
6748 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6749 rcStrict = VINF_SUCCESS;
6750 }
6751
6752 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6753 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6754 return rcStrict;
6755}
6756
6757
6758/**
6759 * VM-exit helper for MOV from CRx (CRx read).
6760 */
6761static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6762{
6763 Assert(iCrReg < 16);
6764 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6765
6766 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6767 AssertRCReturn(rc, rc);
6768
6769 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6770 AssertMsg( rcStrict == VINF_SUCCESS
6771 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6772
6773 if (iGReg == X86_GREG_xSP)
6774 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6775 else
6776 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6777#ifdef VBOX_WITH_STATISTICS
6778 switch (iCrReg)
6779 {
6780 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6781 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6782 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6783 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6784 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6785 }
6786#endif
6787 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6788 return rcStrict;
6789}
6790
6791
6792/**
6793 * VM-exit helper for MOV to CRx (CRx write).
6794 */
6795static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6796{
6797 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6798
6799 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6800 AssertMsg( rcStrict == VINF_SUCCESS
6801 || rcStrict == VINF_IEM_RAISED_XCPT
6802 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6803
6804 switch (iCrReg)
6805 {
6806 case 0:
6807 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6808 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6809 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6810 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6811 break;
6812
6813 case 2:
6814 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6815 /* Nothing to do here, CR2 it's not part of the VMCS. */
6816 break;
6817
6818 case 3:
6819 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6820 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6821 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6822 break;
6823
6824 case 4:
6825 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6826 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6827#ifndef IN_NEM_DARWIN
6828 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6829 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6830#else
6831 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6832#endif
6833 break;
6834
6835 case 8:
6836 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6837 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6838 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6839 break;
6840
6841 default:
6842 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6843 break;
6844 }
6845
6846 if (rcStrict == VINF_IEM_RAISED_XCPT)
6847 {
6848 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6849 rcStrict = VINF_SUCCESS;
6850 }
6851 return rcStrict;
6852}
6853
6854
6855/**
6856 * VM-exit exception handler for \#PF (Page-fault exception).
6857 *
6858 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6859 */
6860static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6861{
6862 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6863 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6864
6865#ifndef IN_NEM_DARWIN
6866 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6867 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6868 { /* likely */ }
6869 else
6870#endif
6871 {
6872#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6873 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6874#endif
6875 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6876 if (!pVmxTransient->fVectoringDoublePF)
6877 {
6878 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6879 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6880 }
6881 else
6882 {
6883 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6884 Assert(!pVmxTransient->fIsNestedGuest);
6885 vmxHCSetPendingXcptDF(pVCpu);
6886 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6887 }
6888 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6889 return VINF_SUCCESS;
6890 }
6891
6892 Assert(!pVmxTransient->fIsNestedGuest);
6893
6894 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6895 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6896 if (pVmxTransient->fVectoringPF)
6897 {
6898 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6899 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6900 }
6901
6902 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6903 AssertRCReturn(rc, rc);
6904
6905 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
6906 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
6907
6908 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6909 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
6910
6911 Log4Func(("#PF: rc=%Rrc\n", rc));
6912 if (rc == VINF_SUCCESS)
6913 {
6914 /*
6915 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6916 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6917 */
6918 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6919 TRPMResetTrap(pVCpu);
6920 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6921 return rc;
6922 }
6923
6924 if (rc == VINF_EM_RAW_GUEST_TRAP)
6925 {
6926 if (!pVmxTransient->fVectoringDoublePF)
6927 {
6928 /* It's a guest page fault and needs to be reflected to the guest. */
6929 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6930 TRPMResetTrap(pVCpu);
6931 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6932 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6933 uGstErrorCode, pVmxTransient->uExitQual);
6934 }
6935 else
6936 {
6937 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6938 TRPMResetTrap(pVCpu);
6939 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6940 vmxHCSetPendingXcptDF(pVCpu);
6941 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6942 }
6943
6944 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6945 return VINF_SUCCESS;
6946 }
6947
6948 TRPMResetTrap(pVCpu);
6949 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6950 return rc;
6951}
6952
6953
6954/**
6955 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6956 *
6957 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6958 */
6959static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6960{
6961 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6962 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6963
6964 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6965 AssertRCReturn(rc, rc);
6966
6967 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6968 {
6969 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6970 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6971
6972 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6973 * provides VM-exit instruction length. If this causes problem later,
6974 * disassemble the instruction like it's done on AMD-V. */
6975 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6976 AssertRCReturn(rc2, rc2);
6977 return rc;
6978 }
6979
6980 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6981 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6982 return VINF_SUCCESS;
6983}
6984
6985
6986/**
6987 * VM-exit exception handler for \#BP (Breakpoint exception).
6988 *
6989 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6990 */
6991static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6992{
6993 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6994 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6995
6996 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6997 AssertRCReturn(rc, rc);
6998
6999 VBOXSTRICTRC rcStrict;
7000 if (!pVmxTransient->fIsNestedGuest)
7001 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
7002 else
7003 rcStrict = VINF_EM_RAW_GUEST_TRAP;
7004
7005 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
7006 {
7007 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7008 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7009 rcStrict = VINF_SUCCESS;
7010 }
7011
7012 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
7013 return rcStrict;
7014}
7015
7016
7017/**
7018 * VM-exit exception handler for \#AC (Alignment-check exception).
7019 *
7020 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7021 */
7022static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7023{
7024 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7025
7026 /*
7027 * Detect #ACs caused by host having enabled split-lock detection.
7028 * Emulate such instructions.
7029 */
7030#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
7031 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7032 AssertRCReturn(rc, rc);
7033 /** @todo detect split lock in cpu feature? */
7034 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
7035 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7036 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
7037 || CPUMGetGuestCPL(pVCpu) != 3
7038 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
7039 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
7040 {
7041 /*
7042 * Check for debug/trace events and import state accordingly.
7043 */
7044 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
7045 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7046 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7047#ifndef IN_NEM_DARWIN
7048 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7049#endif
7050 )
7051 {
7052 if (pVM->cCpus == 1)
7053 {
7054#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7055 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7056 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7057#else
7058 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7059 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7060#endif
7061 AssertRCReturn(rc, rc);
7062 }
7063 }
7064 else
7065 {
7066 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7067 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7068 AssertRCReturn(rc, rc);
7069
7070 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7071
7072 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7073 {
7074 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7075 if (rcStrict != VINF_SUCCESS)
7076 return rcStrict;
7077 }
7078 }
7079
7080 /*
7081 * Emulate the instruction.
7082 *
7083 * We have to ignore the LOCK prefix here as we must not retrigger the
7084 * detection on the host. This isn't all that satisfactory, though...
7085 */
7086 if (pVM->cCpus == 1)
7087 {
7088 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7089 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7090
7091 /** @todo For SMP configs we should do a rendezvous here. */
7092 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7093 if (rcStrict == VINF_SUCCESS)
7094#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7095 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7096 HM_CHANGED_GUEST_RIP
7097 | HM_CHANGED_GUEST_RFLAGS
7098 | HM_CHANGED_GUEST_GPRS_MASK
7099 | HM_CHANGED_GUEST_CS
7100 | HM_CHANGED_GUEST_SS);
7101#else
7102 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7103#endif
7104 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7105 {
7106 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7107 rcStrict = VINF_SUCCESS;
7108 }
7109 return rcStrict;
7110 }
7111 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7112 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7113 return VINF_EM_EMULATE_SPLIT_LOCK;
7114 }
7115
7116 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7117 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7118 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7119
7120 /* Re-inject it. We'll detect any nesting before getting here. */
7121 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7122 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7123 return VINF_SUCCESS;
7124}
7125
7126
7127/**
7128 * VM-exit exception handler for \#DB (Debug exception).
7129 *
7130 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7131 */
7132static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7133{
7134 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7135 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7136
7137 /*
7138 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7139 */
7140 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7141
7142 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7143 uint64_t const uDR6 = X86_DR6_INIT_VAL
7144 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7145 | X86_DR6_BD | X86_DR6_BS));
7146
7147 int rc;
7148 if (!pVmxTransient->fIsNestedGuest)
7149 {
7150 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7151
7152 /*
7153 * Prevents stepping twice over the same instruction when the guest is stepping using
7154 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7155 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7156 */
7157 if ( rc == VINF_EM_DBG_STEPPED
7158 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7159 {
7160 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7161 rc = VINF_EM_RAW_GUEST_TRAP;
7162 }
7163 }
7164 else
7165 rc = VINF_EM_RAW_GUEST_TRAP;
7166 Log6Func(("rc=%Rrc\n", rc));
7167 if (rc == VINF_EM_RAW_GUEST_TRAP)
7168 {
7169 /*
7170 * The exception was for the guest. Update DR6, DR7.GD and
7171 * IA32_DEBUGCTL.LBR before forwarding it.
7172 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
7173 */
7174#ifndef IN_NEM_DARWIN
7175 VMMRZCallRing3Disable(pVCpu);
7176 HM_DISABLE_PREEMPT(pVCpu);
7177
7178 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7179 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7180 if (CPUMIsGuestDebugStateActive(pVCpu))
7181 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7182
7183 HM_RESTORE_PREEMPT();
7184 VMMRZCallRing3Enable(pVCpu);
7185#else
7186 /** @todo */
7187#endif
7188
7189 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7190 AssertRCReturn(rc, rc);
7191
7192 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7193 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7194
7195 /* Paranoia. */
7196 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7197 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7198
7199 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7200 AssertRC(rc);
7201
7202 /*
7203 * Raise #DB in the guest.
7204 *
7205 * It is important to reflect exactly what the VM-exit gave us (preserving the
7206 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7207 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7208 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7209 *
7210 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7211 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7212 */
7213 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7214 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7215 return VINF_SUCCESS;
7216 }
7217
7218 /*
7219 * Not a guest trap, must be a hypervisor related debug event then.
7220 * Update DR6 in case someone is interested in it.
7221 */
7222 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7223 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7224 CPUMSetHyperDR6(pVCpu, uDR6);
7225
7226 return rc;
7227}
7228
7229
7230/**
7231 * Hacks its way around the lovely mesa driver's backdoor accesses.
7232 *
7233 * @sa hmR0SvmHandleMesaDrvGp.
7234 */
7235static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7236{
7237 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7238 RT_NOREF(pCtx);
7239
7240 /* For now we'll just skip the instruction. */
7241 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7242}
7243
7244
7245/**
7246 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7247 * backdoor logging w/o checking what it is running inside.
7248 *
7249 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7250 * backdoor port and magic numbers loaded in registers.
7251 *
7252 * @returns true if it is, false if it isn't.
7253 * @sa hmR0SvmIsMesaDrvGp.
7254 */
7255DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7256{
7257 /* 0xed: IN eAX,dx */
7258 uint8_t abInstr[1];
7259 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7260 return false;
7261
7262 /* Check that it is #GP(0). */
7263 if (pVmxTransient->uExitIntErrorCode != 0)
7264 return false;
7265
7266 /* Check magic and port. */
7267 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7268 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7269 if (pCtx->rax != UINT32_C(0x564d5868))
7270 return false;
7271 if (pCtx->dx != UINT32_C(0x5658))
7272 return false;
7273
7274 /* Flat ring-3 CS. */
7275 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7276 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7277 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7278 if (pCtx->cs.Attr.n.u2Dpl != 3)
7279 return false;
7280 if (pCtx->cs.u64Base != 0)
7281 return false;
7282
7283 /* Check opcode. */
7284 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7285 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7286 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7287 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7288 if (RT_FAILURE(rc))
7289 return false;
7290 if (abInstr[0] != 0xed)
7291 return false;
7292
7293 return true;
7294}
7295
7296
7297/**
7298 * VM-exit exception handler for \#GP (General-protection exception).
7299 *
7300 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7301 */
7302static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7303{
7304 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7305 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7306
7307 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7308 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7309#ifndef IN_NEM_DARWIN
7310 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7311 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7312 { /* likely */ }
7313 else
7314#endif
7315 {
7316#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7317# ifndef IN_NEM_DARWIN
7318 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7319# else
7320 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7321# endif
7322#endif
7323 /*
7324 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7325 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7326 */
7327 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7328 AssertRCReturn(rc, rc);
7329 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7330 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7331
7332 if ( pVmxTransient->fIsNestedGuest
7333 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7334 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7335 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7336 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7337 else
7338 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7339 return rc;
7340 }
7341
7342#ifndef IN_NEM_DARWIN
7343 Assert(CPUMIsGuestInRealModeEx(pCtx));
7344 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7345 Assert(!pVmxTransient->fIsNestedGuest);
7346
7347 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7348 AssertRCReturn(rc, rc);
7349
7350 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7351 if (rcStrict == VINF_SUCCESS)
7352 {
7353 if (!CPUMIsGuestInRealModeEx(pCtx))
7354 {
7355 /*
7356 * The guest is no longer in real-mode, check if we can continue executing the
7357 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7358 */
7359 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7360 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7361 {
7362 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7363 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7364 }
7365 else
7366 {
7367 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7368 rcStrict = VINF_EM_RESCHEDULE;
7369 }
7370 }
7371 else
7372 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7373 }
7374 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7375 {
7376 rcStrict = VINF_SUCCESS;
7377 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7378 }
7379 return VBOXSTRICTRC_VAL(rcStrict);
7380#endif
7381}
7382
7383
7384/**
7385 * VM-exit exception handler for \#DE (Divide Error).
7386 *
7387 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7388 */
7389static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7390{
7391 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7392 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7393
7394 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7395 AssertRCReturn(rc, rc);
7396
7397 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7398 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7399 {
7400 uint8_t cbInstr = 0;
7401 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7402 if (rc2 == VINF_SUCCESS)
7403 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7404 else if (rc2 == VERR_NOT_FOUND)
7405 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7406 else
7407 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7408 }
7409 else
7410 rcStrict = VINF_SUCCESS; /* Do nothing. */
7411
7412 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7413 if (RT_FAILURE(rcStrict))
7414 {
7415 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7416 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7417 rcStrict = VINF_SUCCESS;
7418 }
7419
7420 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7421 return VBOXSTRICTRC_VAL(rcStrict);
7422}
7423
7424
7425/**
7426 * VM-exit exception handler wrapper for all other exceptions that are not handled
7427 * by a specific handler.
7428 *
7429 * This simply re-injects the exception back into the VM without any special
7430 * processing.
7431 *
7432 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7433 */
7434static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7435{
7436 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7437
7438#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7439# ifndef IN_NEM_DARWIN
7440 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7441 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7442 ("uVector=%#x u32XcptBitmap=%#X32\n",
7443 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7444 NOREF(pVmcsInfo);
7445# endif
7446#endif
7447
7448 /*
7449 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7450 * would have been handled while checking exits due to event delivery.
7451 */
7452 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7453
7454#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7455 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7456 AssertRCReturn(rc, rc);
7457 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7458#endif
7459
7460#ifdef VBOX_WITH_STATISTICS
7461 switch (uVector)
7462 {
7463 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7464 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7465 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7466 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7467 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7468 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7469 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7470 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7471 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7472 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7473 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7474 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7475 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7476 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7477 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7478 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7479 default:
7480 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7481 break;
7482 }
7483#endif
7484
7485 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7486 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7487 NOREF(uVector);
7488
7489 /* Re-inject the original exception into the guest. */
7490 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7491 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7492 return VINF_SUCCESS;
7493}
7494
7495
7496/**
7497 * VM-exit exception handler for all exceptions (except NMIs!).
7498 *
7499 * @remarks This may be called for both guests and nested-guests. Take care to not
7500 * make assumptions and avoid doing anything that is not relevant when
7501 * executing a nested-guest (e.g., Mesa driver hacks).
7502 */
7503static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7504{
7505 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7506
7507 /*
7508 * If this VM-exit occurred while delivering an event through the guest IDT, take
7509 * action based on the return code and additional hints (e.g. for page-faults)
7510 * that will be updated in the VMX transient structure.
7511 */
7512 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7513 if (rcStrict == VINF_SUCCESS)
7514 {
7515 /*
7516 * If an exception caused a VM-exit due to delivery of an event, the original
7517 * event may have to be re-injected into the guest. We shall reinject it and
7518 * continue guest execution. However, page-fault is a complicated case and
7519 * needs additional processing done in vmxHCExitXcptPF().
7520 */
7521 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7522 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7523 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7524 || uVector == X86_XCPT_PF)
7525 {
7526 switch (uVector)
7527 {
7528 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7529 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7530 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7531 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7532 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7533 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7534 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7535 default:
7536 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7537 }
7538 }
7539 /* else: inject pending event before resuming guest execution. */
7540 }
7541 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7542 {
7543 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7544 rcStrict = VINF_SUCCESS;
7545 }
7546
7547 return rcStrict;
7548}
7549/** @} */
7550
7551
7552/** @name VM-exit handlers.
7553 * @{
7554 */
7555/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7556/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7557/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7558
7559/**
7560 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7561 */
7562HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7563{
7564 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7565 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7566
7567#ifndef IN_NEM_DARWIN
7568 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7569 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7570 return VINF_SUCCESS;
7571 return VINF_EM_RAW_INTERRUPT;
7572#else
7573 return VINF_SUCCESS;
7574#endif
7575}
7576
7577
7578/**
7579 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7580 * VM-exit.
7581 */
7582HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7583{
7584 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7585 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7586
7587 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7588
7589 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7590 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7591 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7592
7593 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7594 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7595 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7596 NOREF(pVmcsInfo);
7597
7598 VBOXSTRICTRC rcStrict;
7599 switch (uExitIntType)
7600 {
7601#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7602 /*
7603 * Host physical NMIs:
7604 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7605 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7606 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7607 *
7608 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7609 * See Intel spec. 27.5.5 "Updating Non-Register State".
7610 */
7611 case VMX_EXIT_INT_INFO_TYPE_NMI:
7612 {
7613 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7614 break;
7615 }
7616#endif
7617
7618 /*
7619 * Privileged software exceptions (#DB from ICEBP),
7620 * Software exceptions (#BP and #OF),
7621 * Hardware exceptions:
7622 * Process the required exceptions and resume guest execution if possible.
7623 */
7624 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7625 Assert(uVector == X86_XCPT_DB);
7626 RT_FALL_THRU();
7627 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7628 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7629 RT_FALL_THRU();
7630 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7631 {
7632 NOREF(uVector);
7633 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7634 | HMVMX_READ_EXIT_INSTR_LEN
7635 | HMVMX_READ_IDT_VECTORING_INFO
7636 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7637 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7638 break;
7639 }
7640
7641 default:
7642 {
7643 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7644 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7645 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7646 break;
7647 }
7648 }
7649
7650 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7651 return rcStrict;
7652}
7653
7654
7655/**
7656 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7657 */
7658HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7659{
7660 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7661
7662 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7663 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7664 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7665
7666 /* Evaluate and deliver pending events and resume guest execution. */
7667 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7668 return VINF_SUCCESS;
7669}
7670
7671
7672/**
7673 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7674 */
7675HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7676{
7677 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7678
7679 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7680 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7681 {
7682 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7683 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7684 }
7685
7686 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7687
7688 /*
7689 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7690 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7691 */
7692 uint32_t fIntrState;
7693 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7694 AssertRC(rc);
7695 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7696 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7697 {
7698 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7699
7700 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7701 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7702 AssertRC(rc);
7703 }
7704
7705 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7706 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7707
7708 /* Evaluate and deliver pending events and resume guest execution. */
7709 return VINF_SUCCESS;
7710}
7711
7712
7713/**
7714 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7715 */
7716HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7717{
7718 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7719 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7720}
7721
7722
7723/**
7724 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7725 */
7726HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7727{
7728 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7729 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7730}
7731
7732
7733/**
7734 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7735 */
7736HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7737{
7738 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7739
7740 /*
7741 * Get the state we need and update the exit history entry.
7742 */
7743 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7744 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7745 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7746 AssertRCReturn(rc, rc);
7747
7748 VBOXSTRICTRC rcStrict;
7749 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7750 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7751 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7752 if (!pExitRec)
7753 {
7754 /*
7755 * Regular CPUID instruction execution.
7756 */
7757 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7758 if (rcStrict == VINF_SUCCESS)
7759 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7760 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7761 {
7762 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7763 rcStrict = VINF_SUCCESS;
7764 }
7765 }
7766 else
7767 {
7768 /*
7769 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7770 */
7771 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7772 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7773 AssertRCReturn(rc2, rc2);
7774
7775 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7776 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7777
7778 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7779 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7780
7781 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7782 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7783 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7784 }
7785 return rcStrict;
7786}
7787
7788
7789/**
7790 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7791 */
7792HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7793{
7794 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7795
7796 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7797 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7798 AssertRCReturn(rc, rc);
7799
7800 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7801 return VINF_EM_RAW_EMULATE_INSTR;
7802
7803 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7804 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7805}
7806
7807
7808/**
7809 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7810 */
7811HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7812{
7813 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7814
7815 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7816 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7817 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7818 AssertRCReturn(rc, rc);
7819
7820 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7821 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7822 {
7823 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7824 we must reset offsetting on VM-entry. See @bugref{6634}. */
7825 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7826 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7827 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7828 }
7829 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7830 {
7831 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7832 rcStrict = VINF_SUCCESS;
7833 }
7834 return rcStrict;
7835}
7836
7837
7838/**
7839 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7840 */
7841HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7842{
7843 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7844
7845 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7846 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7847 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7848 AssertRCReturn(rc, rc);
7849
7850 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7851 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7852 {
7853 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7854 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7855 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7856 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7857 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7858 }
7859 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7860 {
7861 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7862 rcStrict = VINF_SUCCESS;
7863 }
7864 return rcStrict;
7865}
7866
7867
7868/**
7869 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7870 */
7871HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7872{
7873 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7874
7875 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7876 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7877 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7878 AssertRCReturn(rc, rc);
7879
7880 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
7881 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7882 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7883 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7884 {
7885 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7886 rcStrict = VINF_SUCCESS;
7887 }
7888 return rcStrict;
7889}
7890
7891
7892/**
7893 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7894 */
7895HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7896{
7897 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7898
7899 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7900 if (EMAreHypercallInstructionsEnabled(pVCpu))
7901 {
7902 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7903 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
7904 | CPUMCTX_EXTRN_RFLAGS
7905 | CPUMCTX_EXTRN_CR0
7906 | CPUMCTX_EXTRN_SS
7907 | CPUMCTX_EXTRN_CS
7908 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
7909 AssertRCReturn(rc, rc);
7910
7911 /* Perform the hypercall. */
7912 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7913 if (rcStrict == VINF_SUCCESS)
7914 {
7915 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7916 AssertRCReturn(rc, rc);
7917 }
7918 else
7919 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7920 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7921 || RT_FAILURE(rcStrict));
7922
7923 /* If the hypercall changes anything other than guest's general-purpose registers,
7924 we would need to reload the guest changed bits here before VM-entry. */
7925 }
7926 else
7927 Log4Func(("Hypercalls not enabled\n"));
7928
7929 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7930 if (RT_FAILURE(rcStrict))
7931 {
7932 vmxHCSetPendingXcptUD(pVCpu);
7933 rcStrict = VINF_SUCCESS;
7934 }
7935
7936 return rcStrict;
7937}
7938
7939
7940/**
7941 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7942 */
7943HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7944{
7945 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7946#ifndef IN_NEM_DARWIN
7947 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7948#endif
7949
7950 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7951 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
7952 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7953 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7954 AssertRCReturn(rc, rc);
7955
7956 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7957
7958 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7959 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7960 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7961 {
7962 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7963 rcStrict = VINF_SUCCESS;
7964 }
7965 else
7966 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7967 VBOXSTRICTRC_VAL(rcStrict)));
7968 return rcStrict;
7969}
7970
7971
7972/**
7973 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7974 */
7975HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7976{
7977 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7978
7979 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7980 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7981 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
7982 AssertRCReturn(rc, rc);
7983
7984 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7985 if (rcStrict == VINF_SUCCESS)
7986 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7987 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7988 {
7989 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7990 rcStrict = VINF_SUCCESS;
7991 }
7992
7993 return rcStrict;
7994}
7995
7996
7997/**
7998 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7999 */
8000HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8001{
8002 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8003
8004 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8005 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8006 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8007 AssertRCReturn(rc, rc);
8008
8009 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
8010 if (RT_SUCCESS(rcStrict))
8011 {
8012 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8013 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
8014 rcStrict = VINF_SUCCESS;
8015 }
8016
8017 return rcStrict;
8018}
8019
8020
8021/**
8022 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
8023 * VM-exit.
8024 */
8025HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8026{
8027 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8028 return VINF_EM_RESET;
8029}
8030
8031
8032/**
8033 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8034 */
8035HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8036{
8037 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8038
8039 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8040 AssertRCReturn(rc, rc);
8041
8042 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
8043 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8044 rc = VINF_SUCCESS;
8045 else
8046 rc = VINF_EM_HALT;
8047
8048 if (rc != VINF_SUCCESS)
8049 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8050 return rc;
8051}
8052
8053
8054#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8055/**
8056 * VM-exit handler for instructions that result in a \#UD exception delivered to
8057 * the guest.
8058 */
8059HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8060{
8061 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8062 vmxHCSetPendingXcptUD(pVCpu);
8063 return VINF_SUCCESS;
8064}
8065#endif
8066
8067
8068/**
8069 * VM-exit handler for expiry of the VMX-preemption timer.
8070 */
8071HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8072{
8073 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8074
8075 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8076 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8077Log12(("vmxHCExitPreemptTimer:\n"));
8078
8079 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8080 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8081 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8082 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8083 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8084}
8085
8086
8087/**
8088 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8089 */
8090HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8091{
8092 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8093
8094 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8095 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8096 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8097 AssertRCReturn(rc, rc);
8098
8099 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8100 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8101 : HM_CHANGED_RAISED_XCPT_MASK);
8102
8103#ifndef IN_NEM_DARWIN
8104 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8105 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8106 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8107 {
8108 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8109 hmR0VmxUpdateStartVmFunction(pVCpu);
8110 }
8111#endif
8112
8113 return rcStrict;
8114}
8115
8116
8117/**
8118 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8119 */
8120HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8121{
8122 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8123
8124 /** @todo Enable the new code after finding a reliably guest test-case. */
8125#if 1
8126 return VERR_EM_INTERPRETER;
8127#else
8128 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8129 | HMVMX_READ_EXIT_INSTR_INFO
8130 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8131 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8132 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8133 AssertRCReturn(rc, rc);
8134
8135 /* Paranoia. Ensure this has a memory operand. */
8136 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8137
8138 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8139 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8140 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8141 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8142
8143 RTGCPTR GCPtrDesc;
8144 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8145
8146 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8147 GCPtrDesc, uType);
8148 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8149 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8150 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8151 {
8152 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8153 rcStrict = VINF_SUCCESS;
8154 }
8155 return rcStrict;
8156#endif
8157}
8158
8159
8160/**
8161 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8162 * VM-exit.
8163 */
8164HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8165{
8166 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8167 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8168 AssertRCReturn(rc, rc);
8169
8170 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8171 if (RT_FAILURE(rc))
8172 return rc;
8173
8174 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8175 NOREF(uInvalidReason);
8176
8177#ifdef VBOX_STRICT
8178 uint32_t fIntrState;
8179 uint64_t u64Val;
8180 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8181 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8182 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8183
8184 Log4(("uInvalidReason %u\n", uInvalidReason));
8185 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8186 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8187 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8188
8189 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8190 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8191 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8192 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8193 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8194 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8195 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8196 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8197 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8198 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8199 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8200 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8201# ifndef IN_NEM_DARWIN
8202 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8203 {
8204 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8205 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8206 }
8207
8208 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8209# endif
8210#endif
8211
8212 return VERR_VMX_INVALID_GUEST_STATE;
8213}
8214
8215/**
8216 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8217 */
8218HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8219{
8220 /*
8221 * Cumulative notes of all recognized but unexpected VM-exits.
8222 *
8223 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8224 * nested-paging is used.
8225 *
8226 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8227 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8228 * this function (and thereby stop VM execution) for handling such instructions.
8229 *
8230 *
8231 * VMX_EXIT_INIT_SIGNAL:
8232 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8233 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8234 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8235 *
8236 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8237 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8238 * See Intel spec. "23.8 Restrictions on VMX operation".
8239 *
8240 * VMX_EXIT_SIPI:
8241 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8242 * activity state is used. We don't make use of it as our guests don't have direct
8243 * access to the host local APIC.
8244 *
8245 * See Intel spec. 25.3 "Other Causes of VM-exits".
8246 *
8247 * VMX_EXIT_IO_SMI:
8248 * VMX_EXIT_SMI:
8249 * This can only happen if we support dual-monitor treatment of SMI, which can be
8250 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8251 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8252 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8253 *
8254 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8255 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8256 *
8257 * VMX_EXIT_ERR_MSR_LOAD:
8258 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8259 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8260 * execution.
8261 *
8262 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8263 *
8264 * VMX_EXIT_ERR_MACHINE_CHECK:
8265 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8266 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8267 * #MC exception abort class exception is raised. We thus cannot assume a
8268 * reasonable chance of continuing any sort of execution and we bail.
8269 *
8270 * See Intel spec. 15.1 "Machine-check Architecture".
8271 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8272 *
8273 * VMX_EXIT_PML_FULL:
8274 * VMX_EXIT_VIRTUALIZED_EOI:
8275 * VMX_EXIT_APIC_WRITE:
8276 * We do not currently support any of these features and thus they are all unexpected
8277 * VM-exits.
8278 *
8279 * VMX_EXIT_GDTR_IDTR_ACCESS:
8280 * VMX_EXIT_LDTR_TR_ACCESS:
8281 * VMX_EXIT_RDRAND:
8282 * VMX_EXIT_RSM:
8283 * VMX_EXIT_VMFUNC:
8284 * VMX_EXIT_ENCLS:
8285 * VMX_EXIT_RDSEED:
8286 * VMX_EXIT_XSAVES:
8287 * VMX_EXIT_XRSTORS:
8288 * VMX_EXIT_UMWAIT:
8289 * VMX_EXIT_TPAUSE:
8290 * VMX_EXIT_LOADIWKEY:
8291 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8292 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8293 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8294 *
8295 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8296 */
8297 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8298 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8299 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8300}
8301
8302
8303/**
8304 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8305 */
8306HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8307{
8308 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8309
8310 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8311
8312 /** @todo Optimize this: We currently drag in the whole MSR state
8313 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8314 * MSRs required. That would require changes to IEM and possibly CPUM too.
8315 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8316 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8317 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8318 int rc;
8319 switch (idMsr)
8320 {
8321 default:
8322 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8323 __FUNCTION__);
8324 AssertRCReturn(rc, rc);
8325 break;
8326 case MSR_K8_FS_BASE:
8327 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8328 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8329 AssertRCReturn(rc, rc);
8330 break;
8331 case MSR_K8_GS_BASE:
8332 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8333 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8334 AssertRCReturn(rc, rc);
8335 break;
8336 }
8337
8338 Log4Func(("ecx=%#RX32\n", idMsr));
8339
8340#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8341 Assert(!pVmxTransient->fIsNestedGuest);
8342 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8343 {
8344 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8345 && idMsr != MSR_K6_EFER)
8346 {
8347 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8348 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8349 }
8350 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8351 {
8352 Assert(pVmcsInfo->pvMsrBitmap);
8353 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8354 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8355 {
8356 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8357 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8358 }
8359 }
8360 }
8361#endif
8362
8363 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8364 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8365 if (rcStrict == VINF_SUCCESS)
8366 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8367 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8368 {
8369 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8370 rcStrict = VINF_SUCCESS;
8371 }
8372 else
8373 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8374 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8375
8376 return rcStrict;
8377}
8378
8379
8380/**
8381 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8382 */
8383HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8384{
8385 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8386
8387 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8388
8389 /*
8390 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8391 * Although we don't need to fetch the base as it will be overwritten shortly, while
8392 * loading guest-state we would also load the entire segment register including limit
8393 * and attributes and thus we need to load them here.
8394 */
8395 /** @todo Optimize this: We currently drag in the whole MSR state
8396 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8397 * MSRs required. That would require changes to IEM and possibly CPUM too.
8398 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8399 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8400 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8401 int rc;
8402 switch (idMsr)
8403 {
8404 default:
8405 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8406 __FUNCTION__);
8407 AssertRCReturn(rc, rc);
8408 break;
8409
8410 case MSR_K8_FS_BASE:
8411 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8412 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8413 AssertRCReturn(rc, rc);
8414 break;
8415 case MSR_K8_GS_BASE:
8416 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8417 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8418 AssertRCReturn(rc, rc);
8419 break;
8420 }
8421 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8422
8423 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8424 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8425
8426 if (rcStrict == VINF_SUCCESS)
8427 {
8428 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8429
8430 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8431 if ( idMsr == MSR_IA32_APICBASE
8432 || ( idMsr >= MSR_IA32_X2APIC_START
8433 && idMsr <= MSR_IA32_X2APIC_END))
8434 {
8435 /*
8436 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8437 * When full APIC register virtualization is implemented we'll have to make
8438 * sure APIC state is saved from the VMCS before IEM changes it.
8439 */
8440 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8441 }
8442 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8443 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8444 else if (idMsr == MSR_K6_EFER)
8445 {
8446 /*
8447 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8448 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8449 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8450 */
8451 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8452 }
8453
8454 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8455 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8456 {
8457 switch (idMsr)
8458 {
8459 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8460 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8461 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8462 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8463 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8464 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8465 default:
8466 {
8467#ifndef IN_NEM_DARWIN
8468 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8469 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8470 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8471 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8472#else
8473 AssertMsgFailed(("TODO\n"));
8474#endif
8475 break;
8476 }
8477 }
8478 }
8479#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8480 else
8481 {
8482 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8483 switch (idMsr)
8484 {
8485 case MSR_IA32_SYSENTER_CS:
8486 case MSR_IA32_SYSENTER_EIP:
8487 case MSR_IA32_SYSENTER_ESP:
8488 case MSR_K8_FS_BASE:
8489 case MSR_K8_GS_BASE:
8490 {
8491 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8492 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8493 }
8494
8495 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8496 default:
8497 {
8498 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8499 {
8500 /* EFER MSR writes are always intercepted. */
8501 if (idMsr != MSR_K6_EFER)
8502 {
8503 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8504 idMsr));
8505 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8506 }
8507 }
8508
8509 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8510 {
8511 Assert(pVmcsInfo->pvMsrBitmap);
8512 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8513 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8514 {
8515 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8516 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8517 }
8518 }
8519 break;
8520 }
8521 }
8522 }
8523#endif /* VBOX_STRICT */
8524 }
8525 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8526 {
8527 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8528 rcStrict = VINF_SUCCESS;
8529 }
8530 else
8531 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8532 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8533
8534 return rcStrict;
8535}
8536
8537
8538/**
8539 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8540 */
8541HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8542{
8543 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8544
8545 /** @todo The guest has likely hit a contended spinlock. We might want to
8546 * poke a schedule different guest VCPU. */
8547 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8548 if (RT_SUCCESS(rc))
8549 return VINF_EM_RAW_INTERRUPT;
8550
8551 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8552 return rc;
8553}
8554
8555
8556/**
8557 * VM-exit handler for when the TPR value is lowered below the specified
8558 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8559 */
8560HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8561{
8562 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8563 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8564
8565 /*
8566 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8567 * We'll re-evaluate pending interrupts and inject them before the next VM
8568 * entry so we can just continue execution here.
8569 */
8570 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8571 return VINF_SUCCESS;
8572}
8573
8574
8575/**
8576 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8577 * VM-exit.
8578 *
8579 * @retval VINF_SUCCESS when guest execution can continue.
8580 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8581 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8582 * incompatible guest state for VMX execution (real-on-v86 case).
8583 */
8584HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8585{
8586 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8587 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8588
8589 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8590 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8591 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8592
8593 VBOXSTRICTRC rcStrict;
8594 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8595 uint64_t const uExitQual = pVmxTransient->uExitQual;
8596 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8597 switch (uAccessType)
8598 {
8599 /*
8600 * MOV to CRx.
8601 */
8602 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8603 {
8604 /*
8605 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8606 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8607 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8608 * PAE PDPTEs as well.
8609 */
8610 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8611 AssertRCReturn(rc, rc);
8612
8613 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8614#ifndef IN_NEM_DARWIN
8615 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8616#endif
8617 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8618 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8619
8620 /*
8621 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8622 * - When nested paging isn't used.
8623 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8624 * - We are executing in the VM debug loop.
8625 */
8626#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8627# ifndef IN_NEM_DARWIN
8628 Assert( iCrReg != 3
8629 || !VM_IS_VMX_NESTED_PAGING(pVM)
8630 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8631 || pVCpu->hmr0.s.fUsingDebugLoop);
8632# else
8633 Assert( iCrReg != 3
8634 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8635# endif
8636#endif
8637
8638 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8639 Assert( iCrReg != 8
8640 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8641
8642 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8643 AssertMsg( rcStrict == VINF_SUCCESS
8644 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8645
8646#ifndef IN_NEM_DARWIN
8647 /*
8648 * This is a kludge for handling switches back to real mode when we try to use
8649 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8650 * deal with special selector values, so we have to return to ring-3 and run
8651 * there till the selector values are V86 mode compatible.
8652 *
8653 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8654 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8655 * this function.
8656 */
8657 if ( iCrReg == 0
8658 && rcStrict == VINF_SUCCESS
8659 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8660 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8661 && (uOldCr0 & X86_CR0_PE)
8662 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8663 {
8664 /** @todo Check selectors rather than returning all the time. */
8665 Assert(!pVmxTransient->fIsNestedGuest);
8666 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8667 rcStrict = VINF_EM_RESCHEDULE_REM;
8668 }
8669#endif
8670
8671 break;
8672 }
8673
8674 /*
8675 * MOV from CRx.
8676 */
8677 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8678 {
8679 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8680 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8681
8682 /*
8683 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8684 * - When nested paging isn't used.
8685 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8686 * - We are executing in the VM debug loop.
8687 */
8688#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8689# ifndef IN_NEM_DARWIN
8690 Assert( iCrReg != 3
8691 || !VM_IS_VMX_NESTED_PAGING(pVM)
8692 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8693 || pVCpu->hmr0.s.fLeaveDone);
8694# else
8695 Assert( iCrReg != 3
8696 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8697# endif
8698#endif
8699
8700 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8701 Assert( iCrReg != 8
8702 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8703
8704 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8705 break;
8706 }
8707
8708 /*
8709 * CLTS (Clear Task-Switch Flag in CR0).
8710 */
8711 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8712 {
8713 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8714 break;
8715 }
8716
8717 /*
8718 * LMSW (Load Machine-Status Word into CR0).
8719 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8720 */
8721 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8722 {
8723 RTGCPTR GCPtrEffDst;
8724 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8725 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8726 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8727 if (fMemOperand)
8728 {
8729 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8730 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8731 }
8732 else
8733 GCPtrEffDst = NIL_RTGCPTR;
8734 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8735 break;
8736 }
8737
8738 default:
8739 {
8740 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8741 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8742 }
8743 }
8744
8745 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8746 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8747 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8748
8749 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8750 NOREF(pVM);
8751 return rcStrict;
8752}
8753
8754
8755/**
8756 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8757 * VM-exit.
8758 */
8759HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8760{
8761 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8762 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8763
8764 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8765 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8766 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8767 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8768#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8769 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8770 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8771 AssertRCReturn(rc, rc);
8772
8773 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8774 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8775 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8776 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8777 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8778 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8779 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8780 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8781
8782 /*
8783 * Update exit history to see if this exit can be optimized.
8784 */
8785 VBOXSTRICTRC rcStrict;
8786 PCEMEXITREC pExitRec = NULL;
8787 if ( !fGstStepping
8788 && !fDbgStepping)
8789 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8790 !fIOString
8791 ? !fIOWrite
8792 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8793 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8794 : !fIOWrite
8795 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8796 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8797 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8798 if (!pExitRec)
8799 {
8800 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8801 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8802
8803 uint32_t const cbValue = s_aIOSizes[uIOSize];
8804 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8805 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8806 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8807 if (fIOString)
8808 {
8809 /*
8810 * INS/OUTS - I/O String instruction.
8811 *
8812 * Use instruction-information if available, otherwise fall back on
8813 * interpreting the instruction.
8814 */
8815 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8816 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8817 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8818 if (fInsOutsInfo)
8819 {
8820 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8821 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8822 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8823 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8824 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8825 if (fIOWrite)
8826 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8827 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8828 else
8829 {
8830 /*
8831 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8832 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8833 * See Intel Instruction spec. for "INS".
8834 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8835 */
8836 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8837 }
8838 }
8839 else
8840 rcStrict = IEMExecOne(pVCpu);
8841
8842 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8843 fUpdateRipAlready = true;
8844 }
8845 else
8846 {
8847 /*
8848 * IN/OUT - I/O instruction.
8849 */
8850 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8851 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8852 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8853 if (fIOWrite)
8854 {
8855 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8856 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8857#ifndef IN_NEM_DARWIN
8858 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8859 && !pCtx->eflags.Bits.u1TF)
8860 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8861#endif
8862 }
8863 else
8864 {
8865 uint32_t u32Result = 0;
8866 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8867 if (IOM_SUCCESS(rcStrict))
8868 {
8869 /* Save result of I/O IN instr. in AL/AX/EAX. */
8870 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8871 }
8872#ifndef IN_NEM_DARWIN
8873 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8874 && !pCtx->eflags.Bits.u1TF)
8875 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8876#endif
8877 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8878 }
8879 }
8880
8881 if (IOM_SUCCESS(rcStrict))
8882 {
8883 if (!fUpdateRipAlready)
8884 {
8885 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8886 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8887 }
8888
8889 /*
8890 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8891 * while booting Fedora 17 64-bit guest.
8892 *
8893 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8894 */
8895 if (fIOString)
8896 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8897
8898 /*
8899 * If any I/O breakpoints are armed, we need to check if one triggered
8900 * and take appropriate action.
8901 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8902 */
8903#if 1
8904 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
8905#else
8906 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
8907 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
8908 AssertRCReturn(rc, rc);
8909#endif
8910
8911 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8912 * execution engines about whether hyper BPs and such are pending. */
8913 uint32_t const uDr7 = pCtx->dr[7];
8914 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8915 && X86_DR7_ANY_RW_IO(uDr7)
8916 && (pCtx->cr4 & X86_CR4_DE))
8917 || DBGFBpIsHwIoArmed(pVM)))
8918 {
8919 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8920
8921#ifndef IN_NEM_DARWIN
8922 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8923 VMMRZCallRing3Disable(pVCpu);
8924 HM_DISABLE_PREEMPT(pVCpu);
8925
8926 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8927
8928 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8929 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8930 {
8931 /* Raise #DB. */
8932 if (fIsGuestDbgActive)
8933 ASMSetDR6(pCtx->dr[6]);
8934 if (pCtx->dr[7] != uDr7)
8935 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8936
8937 vmxHCSetPendingXcptDB(pVCpu);
8938 }
8939 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8940 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8941 else if ( rcStrict2 != VINF_SUCCESS
8942 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8943 rcStrict = rcStrict2;
8944 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8945
8946 HM_RESTORE_PREEMPT();
8947 VMMRZCallRing3Enable(pVCpu);
8948#else
8949 /** @todo */
8950#endif
8951 }
8952 }
8953
8954#ifdef VBOX_STRICT
8955 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8956 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8957 Assert(!fIOWrite);
8958 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8959 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8960 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8961 Assert(fIOWrite);
8962 else
8963 {
8964# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8965 * statuses, that the VMM device and some others may return. See
8966 * IOM_SUCCESS() for guidance. */
8967 AssertMsg( RT_FAILURE(rcStrict)
8968 || rcStrict == VINF_SUCCESS
8969 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8970 || rcStrict == VINF_EM_DBG_BREAKPOINT
8971 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8972 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8973# endif
8974 }
8975#endif
8976 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8977 }
8978 else
8979 {
8980 /*
8981 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8982 */
8983 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
8984 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8985 AssertRCReturn(rc2, rc2);
8986 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8987 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8988 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8989 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8990 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8991 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8992
8993 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8994 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8995
8996 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8997 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8998 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8999 }
9000 return rcStrict;
9001}
9002
9003
9004/**
9005 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
9006 * VM-exit.
9007 */
9008HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9009{
9010 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9011
9012 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
9013 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9014 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
9015 {
9016 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
9017 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9018 {
9019 uint32_t uErrCode;
9020 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
9021 {
9022 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9023 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
9024 }
9025 else
9026 uErrCode = 0;
9027
9028 RTGCUINTPTR GCPtrFaultAddress;
9029 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
9030 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
9031 else
9032 GCPtrFaultAddress = 0;
9033
9034 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9035
9036 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
9037 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
9038
9039 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
9040 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
9041 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9042 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9043 }
9044 }
9045
9046 /* Fall back to the interpreter to emulate the task-switch. */
9047 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9048 return VERR_EM_INTERPRETER;
9049}
9050
9051
9052/**
9053 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9054 */
9055HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9056{
9057 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9058
9059 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9060 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9061 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9062 AssertRC(rc);
9063 return VINF_EM_DBG_STEPPED;
9064}
9065
9066
9067/**
9068 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9069 */
9070HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9071{
9072 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9073 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9074
9075 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9076 | HMVMX_READ_EXIT_INSTR_LEN
9077 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9078 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9079 | HMVMX_READ_IDT_VECTORING_INFO
9080 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9081
9082 /*
9083 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9084 */
9085 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9086 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9087 {
9088 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9089 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9090 {
9091 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9092 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9093 }
9094 }
9095 else
9096 {
9097 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9098 return rcStrict;
9099 }
9100
9101 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9102 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9103 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9104 AssertRCReturn(rc, rc);
9105
9106 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9107 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9108 switch (uAccessType)
9109 {
9110#ifndef IN_NEM_DARWIN
9111 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9112 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9113 {
9114 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9115 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9116 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9117
9118 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9119 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9120 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9121 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9122 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9123
9124 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9125 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9126 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9127 if ( rcStrict == VINF_SUCCESS
9128 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9129 || rcStrict == VERR_PAGE_NOT_PRESENT)
9130 {
9131 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9132 | HM_CHANGED_GUEST_APIC_TPR);
9133 rcStrict = VINF_SUCCESS;
9134 }
9135 break;
9136 }
9137#else
9138 /** @todo */
9139#endif
9140
9141 default:
9142 {
9143 Log4Func(("uAccessType=%#x\n", uAccessType));
9144 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9145 break;
9146 }
9147 }
9148
9149 if (rcStrict != VINF_SUCCESS)
9150 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9151 return rcStrict;
9152}
9153
9154
9155/**
9156 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9157 * VM-exit.
9158 */
9159HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9160{
9161 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9162 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9163
9164 /*
9165 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9166 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9167 * must emulate the MOV DRx access.
9168 */
9169 if (!pVmxTransient->fIsNestedGuest)
9170 {
9171 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9172 if (pVmxTransient->fWasGuestDebugStateActive)
9173 {
9174 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9175 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9176 }
9177
9178 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9179 && !pVmxTransient->fWasHyperDebugStateActive)
9180 {
9181 Assert(!DBGFIsStepping(pVCpu));
9182 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9183
9184 /* Don't intercept MOV DRx any more. */
9185 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9186 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9187 AssertRC(rc);
9188
9189#ifndef IN_NEM_DARWIN
9190 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9191 VMMRZCallRing3Disable(pVCpu);
9192 HM_DISABLE_PREEMPT(pVCpu);
9193
9194 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9195 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9196 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9197
9198 HM_RESTORE_PREEMPT();
9199 VMMRZCallRing3Enable(pVCpu);
9200#else
9201 CPUMR3NemActivateGuestDebugState(pVCpu);
9202 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9203 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9204#endif
9205
9206#ifdef VBOX_WITH_STATISTICS
9207 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9208 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9209 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9210 else
9211 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9212#endif
9213 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9214 return VINF_SUCCESS;
9215 }
9216 }
9217
9218 /*
9219 * Import state. We must have DR7 loaded here as it's always consulted,
9220 * both for reading and writing. The other debug registers are never
9221 * exported as such.
9222 */
9223 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9224 int rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
9225 | CPUMCTX_EXTRN_GPRS_MASK
9226 | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9227 AssertRCReturn(rc, rc);
9228 Log4Func(("cs:rip=%#04x:%08RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9229
9230 uint8_t const iGReg = VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual);
9231 uint8_t const iDrReg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9232
9233 VBOXSTRICTRC rcStrict;
9234 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9235 {
9236 /*
9237 * Write DRx register.
9238 */
9239 rcStrict = IEMExecDecodedMovDRxWrite(pVCpu, pVmxTransient->cbExitInstr, iDrReg, iGReg);
9240 AssertMsg( rcStrict == VINF_SUCCESS
9241 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9242
9243 if (rcStrict == VINF_SUCCESS)
9244 /** @todo r=bird: Not sure why we always flag DR7 as modified here, but I've
9245 * kept it for now to avoid breaking something non-obvious. */
9246 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9247 | HM_CHANGED_GUEST_DR7);
9248 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9249 {
9250 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9251 rcStrict = VINF_SUCCESS;
9252 }
9253
9254 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9255 }
9256 else
9257 {
9258 /*
9259 * Read DRx register into a general purpose register.
9260 */
9261 rcStrict = IEMExecDecodedMovDRxRead(pVCpu, pVmxTransient->cbExitInstr, iGReg, iDrReg);
9262 AssertMsg( rcStrict == VINF_SUCCESS
9263 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9264
9265 if (rcStrict == VINF_SUCCESS)
9266 {
9267 if (iGReg == X86_GREG_xSP)
9268 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9269 | HM_CHANGED_GUEST_RSP);
9270 else
9271 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9272 }
9273 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9274 {
9275 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9276 rcStrict = VINF_SUCCESS;
9277 }
9278
9279 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9280 }
9281
9282 return rcStrict;
9283}
9284
9285
9286/**
9287 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9288 * Conditional VM-exit.
9289 */
9290HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9291{
9292 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9293
9294#ifndef IN_NEM_DARWIN
9295 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9296
9297 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9298 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9299 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9300 | HMVMX_READ_IDT_VECTORING_INFO
9301 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9302 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9303
9304 /*
9305 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9306 */
9307 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9308 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9309 {
9310 /*
9311 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9312 * instruction emulation to inject the original event. Otherwise, injecting the original event
9313 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9314 */
9315 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9316 { /* likely */ }
9317 else
9318 {
9319 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9320# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9321 /** @todo NSTVMX: Think about how this should be handled. */
9322 if (pVmxTransient->fIsNestedGuest)
9323 return VERR_VMX_IPE_3;
9324# endif
9325 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9326 }
9327 }
9328 else
9329 {
9330 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9331 return rcStrict;
9332 }
9333
9334 /*
9335 * Get sufficient state and update the exit history entry.
9336 */
9337 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9338 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9339 AssertRCReturn(rc, rc);
9340
9341 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9342 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9343 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9344 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9345 if (!pExitRec)
9346 {
9347 /*
9348 * If we succeed, resume guest execution.
9349 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9350 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9351 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9352 * weird case. See @bugref{6043}.
9353 */
9354 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9355/** @todo bird: We can probably just go straight to IOM here and assume that
9356 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9357 * well. However, we need to address that aliasing workarounds that
9358 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9359 *
9360 * Might also be interesting to see if we can get this done more or
9361 * less locklessly inside IOM. Need to consider the lookup table
9362 * updating and use a bit more carefully first (or do all updates via
9363 * rendezvous) */
9364 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9365 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9366 if ( rcStrict == VINF_SUCCESS
9367 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9368 || rcStrict == VERR_PAGE_NOT_PRESENT)
9369 {
9370 /* Successfully handled MMIO operation. */
9371 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9372 | HM_CHANGED_GUEST_APIC_TPR);
9373 rcStrict = VINF_SUCCESS;
9374 }
9375 }
9376 else
9377 {
9378 /*
9379 * Frequent exit or something needing probing. Call EMHistoryExec.
9380 */
9381 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9382 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9383
9384 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9385 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9386
9387 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9388 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9389 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9390 }
9391 return rcStrict;
9392#else
9393 AssertFailed();
9394 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9395#endif
9396}
9397
9398
9399/**
9400 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9401 * VM-exit.
9402 */
9403HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9404{
9405 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9406#ifndef IN_NEM_DARWIN
9407 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9408
9409 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9410 | HMVMX_READ_EXIT_INSTR_LEN
9411 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9412 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9413 | HMVMX_READ_IDT_VECTORING_INFO
9414 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9415 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9416
9417 /*
9418 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9419 */
9420 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9421 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9422 {
9423 /*
9424 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9425 * we shall resolve the nested #PF and re-inject the original event.
9426 */
9427 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9428 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9429 }
9430 else
9431 {
9432 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9433 return rcStrict;
9434 }
9435
9436 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9437 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9438 AssertRCReturn(rc, rc);
9439
9440 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9441 uint64_t const uExitQual = pVmxTransient->uExitQual;
9442 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9443
9444 RTGCUINT uErrorCode = 0;
9445 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9446 uErrorCode |= X86_TRAP_PF_ID;
9447 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9448 uErrorCode |= X86_TRAP_PF_RW;
9449 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9450 uErrorCode |= X86_TRAP_PF_P;
9451
9452 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9453 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9454
9455 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9456
9457 /*
9458 * Handle the pagefault trap for the nested shadow table.
9459 */
9460 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9461 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9462 TRPMResetTrap(pVCpu);
9463
9464 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9465 if ( rcStrict == VINF_SUCCESS
9466 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9467 || rcStrict == VERR_PAGE_NOT_PRESENT)
9468 {
9469 /* Successfully synced our nested page tables. */
9470 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9471 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9472 return VINF_SUCCESS;
9473 }
9474 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9475 return rcStrict;
9476
9477#else /* IN_NEM_DARWIN */
9478 PVM pVM = pVCpu->CTX_SUFF(pVM);
9479 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9480 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9481 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9482 vmxHCImportGuestRip(pVCpu);
9483 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9484
9485 /*
9486 * Ask PGM for information about the given GCPhys. We need to check if we're
9487 * out of sync first.
9488 */
9489 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9490 false,
9491 false };
9492 PGMPHYSNEMPAGEINFO Info;
9493 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9494 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9495 if (RT_SUCCESS(rc))
9496 {
9497 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9498 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9499 {
9500 if (State.fCanResume)
9501 {
9502 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9503 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9504 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9505 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9506 State.fDidSomething ? "" : " no-change"));
9507 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9508 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9509 return VINF_SUCCESS;
9510 }
9511 }
9512
9513 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9514 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9515 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9516 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9517 State.fDidSomething ? "" : " no-change"));
9518 }
9519 else
9520 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9521 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9522 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9523
9524 /*
9525 * Emulate the memory access, either access handler or special memory.
9526 */
9527 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9528 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9529 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9530 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9531 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9532
9533 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9534 AssertRCReturn(rc, rc);
9535
9536 VBOXSTRICTRC rcStrict;
9537 if (!pExitRec)
9538 rcStrict = IEMExecOne(pVCpu);
9539 else
9540 {
9541 /* Frequent access or probing. */
9542 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9543 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9544 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9545 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9546 }
9547
9548 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9549
9550 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9551 return rcStrict;
9552#endif /* IN_NEM_DARWIN */
9553}
9554
9555#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9556
9557/**
9558 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9559 */
9560HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9561{
9562 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9563
9564 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9565 | HMVMX_READ_EXIT_INSTR_INFO
9566 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9567 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9568 | CPUMCTX_EXTRN_SREG_MASK
9569 | CPUMCTX_EXTRN_HWVIRT
9570 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9571 AssertRCReturn(rc, rc);
9572
9573 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9574
9575 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9576 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9577
9578 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9579 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9580 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9581 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9582 {
9583 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9584 rcStrict = VINF_SUCCESS;
9585 }
9586 return rcStrict;
9587}
9588
9589
9590/**
9591 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9592 */
9593HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9594{
9595 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9596
9597 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9598 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9599 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9600 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9601 AssertRCReturn(rc, rc);
9602
9603 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9604
9605 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9606 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9607 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9608 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9609 {
9610 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9611 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9612 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9613 }
9614 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9615 return rcStrict;
9616}
9617
9618
9619/**
9620 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9621 */
9622HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9623{
9624 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9625
9626 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9627 | HMVMX_READ_EXIT_INSTR_INFO
9628 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9629 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9630 | CPUMCTX_EXTRN_SREG_MASK
9631 | CPUMCTX_EXTRN_HWVIRT
9632 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9633 AssertRCReturn(rc, rc);
9634
9635 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9636
9637 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9638 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9639
9640 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9641 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9642 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9643 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9644 {
9645 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9646 rcStrict = VINF_SUCCESS;
9647 }
9648 return rcStrict;
9649}
9650
9651
9652/**
9653 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9654 */
9655HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9656{
9657 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9658
9659 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9660 | HMVMX_READ_EXIT_INSTR_INFO
9661 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9662 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9663 | CPUMCTX_EXTRN_SREG_MASK
9664 | CPUMCTX_EXTRN_HWVIRT
9665 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9666 AssertRCReturn(rc, rc);
9667
9668 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9669
9670 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9671 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9672
9673 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9674 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9675 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9676 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9677 {
9678 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9679 rcStrict = VINF_SUCCESS;
9680 }
9681 return rcStrict;
9682}
9683
9684
9685/**
9686 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9687 */
9688HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9689{
9690 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9691
9692 /*
9693 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9694 * thus might not need to import the shadow VMCS state, it's safer just in case
9695 * code elsewhere dares look at unsynced VMCS fields.
9696 */
9697 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9698 | HMVMX_READ_EXIT_INSTR_INFO
9699 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9700 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9701 | CPUMCTX_EXTRN_SREG_MASK
9702 | CPUMCTX_EXTRN_HWVIRT
9703 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9704 AssertRCReturn(rc, rc);
9705
9706 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9707
9708 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9709 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9710 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9711
9712 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9713 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9714 {
9715 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9716
9717# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9718 /* Try for exit optimization. This is on the following instruction
9719 because it would be a waste of time to have to reinterpret the
9720 already decoded vmwrite instruction. */
9721 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9722 if (pExitRec)
9723 {
9724 /* Frequent access or probing. */
9725 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9726 AssertRCReturn(rc, rc);
9727
9728 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9729 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9730 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9731 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9732 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9733 }
9734# endif
9735 }
9736 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9737 {
9738 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9739 rcStrict = VINF_SUCCESS;
9740 }
9741 return rcStrict;
9742}
9743
9744
9745/**
9746 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9747 */
9748HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9749{
9750 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9751
9752 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9753 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9754 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9755 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9756 AssertRCReturn(rc, rc);
9757
9758 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9759
9760 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9761 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9762 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9763 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9764 {
9765 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9766 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9767 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9768 }
9769 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9770 return rcStrict;
9771}
9772
9773
9774/**
9775 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9776 */
9777HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9778{
9779 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9780
9781 /*
9782 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9783 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9784 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9785 */
9786 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9787 | HMVMX_READ_EXIT_INSTR_INFO
9788 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9789 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9790 | CPUMCTX_EXTRN_SREG_MASK
9791 | CPUMCTX_EXTRN_HWVIRT
9792 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9793 AssertRCReturn(rc, rc);
9794
9795 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9796
9797 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9798 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9799 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9800
9801 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9802 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9803 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9804 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9805 {
9806 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9807 rcStrict = VINF_SUCCESS;
9808 }
9809 return rcStrict;
9810}
9811
9812
9813/**
9814 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9815 */
9816HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9817{
9818 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9819
9820 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9821 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9822 | CPUMCTX_EXTRN_HWVIRT
9823 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9824 AssertRCReturn(rc, rc);
9825
9826 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9827
9828 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9829 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9830 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9831 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9832 {
9833 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9834 rcStrict = VINF_SUCCESS;
9835 }
9836 return rcStrict;
9837}
9838
9839
9840/**
9841 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9842 */
9843HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9844{
9845 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9846
9847 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9848 | HMVMX_READ_EXIT_INSTR_INFO
9849 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9850 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9851 | CPUMCTX_EXTRN_SREG_MASK
9852 | CPUMCTX_EXTRN_HWVIRT
9853 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9854 AssertRCReturn(rc, rc);
9855
9856 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9857
9858 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9859 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9860
9861 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9862 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9863 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9864 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9865 {
9866 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9867 rcStrict = VINF_SUCCESS;
9868 }
9869 return rcStrict;
9870}
9871
9872
9873/**
9874 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9875 */
9876HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9877{
9878 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9879
9880 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9881 | HMVMX_READ_EXIT_INSTR_INFO
9882 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9883 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9884 | CPUMCTX_EXTRN_SREG_MASK
9885 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9886 AssertRCReturn(rc, rc);
9887
9888 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9889
9890 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9891 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9892
9893 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9894 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9895 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9896 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9897 {
9898 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9899 rcStrict = VINF_SUCCESS;
9900 }
9901 return rcStrict;
9902}
9903
9904
9905# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9906/**
9907 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9908 */
9909HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9910{
9911 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9912
9913 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9914 | HMVMX_READ_EXIT_INSTR_INFO
9915 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9916 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9917 | CPUMCTX_EXTRN_SREG_MASK
9918 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9919 AssertRCReturn(rc, rc);
9920
9921 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9922
9923 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9924 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9925
9926 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9927 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9928 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9929 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9930 {
9931 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9932 rcStrict = VINF_SUCCESS;
9933 }
9934 return rcStrict;
9935}
9936# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9937#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9938/** @} */
9939
9940
9941#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9942/** @name Nested-guest VM-exit handlers.
9943 * @{
9944 */
9945/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9946/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9947/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9948
9949/**
9950 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9951 * Conditional VM-exit.
9952 */
9953HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9954{
9955 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9956
9957 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
9958
9959 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9960 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9961 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9962
9963 switch (uExitIntType)
9964 {
9965# ifndef IN_NEM_DARWIN
9966 /*
9967 * Physical NMIs:
9968 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9969 */
9970 case VMX_EXIT_INT_INFO_TYPE_NMI:
9971 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9972# endif
9973
9974 /*
9975 * Hardware exceptions,
9976 * Software exceptions,
9977 * Privileged software exceptions:
9978 * Figure out if the exception must be delivered to the guest or the nested-guest.
9979 */
9980 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9981 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9982 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9983 {
9984 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9985 | HMVMX_READ_EXIT_INSTR_LEN
9986 | HMVMX_READ_IDT_VECTORING_INFO
9987 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9988
9989 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9990 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
9991 {
9992 /* Exit qualification is required for debug and page-fault exceptions. */
9993 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9994
9995 /*
9996 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9997 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9998 * length. However, if delivery of a software interrupt, software exception or privileged
9999 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
10000 */
10001 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10002 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
10003 pVmxTransient->uExitIntErrorCode,
10004 pVmxTransient->uIdtVectoringInfo,
10005 pVmxTransient->uIdtVectoringErrorCode);
10006#ifdef DEBUG_ramshankar
10007 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10008 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
10009 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
10010 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
10011 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
10012 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
10013#endif
10014 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
10015 }
10016
10017 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
10018 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10019 return vmxHCExitXcpt(pVCpu, pVmxTransient);
10020 }
10021
10022 /*
10023 * Software interrupts:
10024 * VM-exits cannot be caused by software interrupts.
10025 *
10026 * External interrupts:
10027 * This should only happen when "acknowledge external interrupts on VM-exit"
10028 * control is set. However, we never set this when executing a guest or
10029 * nested-guest. For nested-guests it is emulated while injecting interrupts into
10030 * the guest.
10031 */
10032 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10033 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
10034 default:
10035 {
10036 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
10037 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10038 }
10039 }
10040}
10041
10042
10043/**
10044 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10045 * Unconditional VM-exit.
10046 */
10047HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10048{
10049 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10050 return IEMExecVmxVmexitTripleFault(pVCpu);
10051}
10052
10053
10054/**
10055 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10056 */
10057HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10058{
10059 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10060
10061 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10062 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10063 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10064}
10065
10066
10067/**
10068 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10069 */
10070HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10071{
10072 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10073
10074 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10075 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10076 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10077}
10078
10079
10080/**
10081 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10082 * Unconditional VM-exit.
10083 */
10084HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10085{
10086 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10087
10088 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10089 | HMVMX_READ_EXIT_INSTR_LEN
10090 | HMVMX_READ_IDT_VECTORING_INFO
10091 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10092
10093 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10094 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10095 pVmxTransient->uIdtVectoringErrorCode);
10096 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10097}
10098
10099
10100/**
10101 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10102 */
10103HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10104{
10105 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10106
10107 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10108 {
10109 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10110 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10111 }
10112 return vmxHCExitHlt(pVCpu, pVmxTransient);
10113}
10114
10115
10116/**
10117 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10118 */
10119HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10120{
10121 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10122
10123 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10124 {
10125 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10126 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10127 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10128 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10129 }
10130 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10131}
10132
10133
10134/**
10135 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10136 */
10137HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10138{
10139 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10140
10141 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10142 {
10143 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10144 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10145 }
10146 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10147}
10148
10149
10150/**
10151 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10152 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10153 */
10154HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10155{
10156 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10157
10158 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10159 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10160
10161 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10162
10163 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10164 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10165 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10166
10167 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10168 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10169 u64VmcsField &= UINT64_C(0xffffffff);
10170
10171 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10172 {
10173 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10174 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10175 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10176 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10177 }
10178
10179 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10180 return vmxHCExitVmread(pVCpu, pVmxTransient);
10181 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10182}
10183
10184
10185/**
10186 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10187 */
10188HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10189{
10190 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10191
10192 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10193 {
10194 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10195 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10196 }
10197
10198 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10199}
10200
10201
10202/**
10203 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10204 * Conditional VM-exit.
10205 */
10206HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10207{
10208 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10209
10210 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10211 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10212
10213 VBOXSTRICTRC rcStrict;
10214 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10215 switch (uAccessType)
10216 {
10217 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10218 {
10219 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10220 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10221 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10222 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10223
10224 bool fIntercept;
10225 switch (iCrReg)
10226 {
10227 case 0:
10228 case 4:
10229 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10230 break;
10231
10232 case 3:
10233 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10234 break;
10235
10236 case 8:
10237 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10238 break;
10239
10240 default:
10241 fIntercept = false;
10242 break;
10243 }
10244 if (fIntercept)
10245 {
10246 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10247 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10248 }
10249 else
10250 {
10251 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10252 AssertRCReturn(rc, rc);
10253 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10254 }
10255 break;
10256 }
10257
10258 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10259 {
10260 /*
10261 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10262 * CR2 reads do not cause a VM-exit.
10263 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10264 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10265 */
10266 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10267 if ( iCrReg == 3
10268 || iCrReg == 8)
10269 {
10270 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10271 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10272 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10273 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10274 {
10275 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10276 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10277 }
10278 else
10279 {
10280 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10281 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10282 }
10283 }
10284 else
10285 {
10286 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10287 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10288 }
10289 break;
10290 }
10291
10292 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10293 {
10294 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10295 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10296 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10297 if ( (uGstHostMask & X86_CR0_TS)
10298 && (uReadShadow & X86_CR0_TS))
10299 {
10300 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10301 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10302 }
10303 else
10304 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10305 break;
10306 }
10307
10308 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10309 {
10310 RTGCPTR GCPtrEffDst;
10311 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10312 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10313 if (fMemOperand)
10314 {
10315 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10316 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10317 }
10318 else
10319 GCPtrEffDst = NIL_RTGCPTR;
10320
10321 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10322 {
10323 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10324 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10325 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10326 }
10327 else
10328 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10329 break;
10330 }
10331
10332 default:
10333 {
10334 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10335 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10336 }
10337 }
10338
10339 if (rcStrict == VINF_IEM_RAISED_XCPT)
10340 {
10341 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10342 rcStrict = VINF_SUCCESS;
10343 }
10344 return rcStrict;
10345}
10346
10347
10348/**
10349 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10350 * Conditional VM-exit.
10351 */
10352HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10353{
10354 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10355
10356 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10357 {
10358 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10359 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10360 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10361 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10362 }
10363 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10364}
10365
10366
10367/**
10368 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10369 * Conditional VM-exit.
10370 */
10371HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10372{
10373 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10374
10375 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10376
10377 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10378 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10379 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10380
10381 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10382 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10383 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10384 {
10385 /*
10386 * IN/OUT instruction:
10387 * - Provides VM-exit instruction length.
10388 *
10389 * INS/OUTS instruction:
10390 * - Provides VM-exit instruction length.
10391 * - Provides Guest-linear address.
10392 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10393 */
10394 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10395 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10396
10397 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10398 pVmxTransient->ExitInstrInfo.u = 0;
10399 pVmxTransient->uGuestLinearAddr = 0;
10400
10401 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10402 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10403 if (fIOString)
10404 {
10405 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10406 if (fVmxInsOutsInfo)
10407 {
10408 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10409 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10410 }
10411 }
10412
10413 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10414 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10415 }
10416 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10417}
10418
10419
10420/**
10421 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10422 */
10423HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10424{
10425 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10426
10427 uint32_t fMsrpm;
10428 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10429 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10430 else
10431 fMsrpm = VMXMSRPM_EXIT_RD;
10432
10433 if (fMsrpm & VMXMSRPM_EXIT_RD)
10434 {
10435 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10436 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10437 }
10438 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10439}
10440
10441
10442/**
10443 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10444 */
10445HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10446{
10447 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10448
10449 uint32_t fMsrpm;
10450 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10451 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10452 else
10453 fMsrpm = VMXMSRPM_EXIT_WR;
10454
10455 if (fMsrpm & VMXMSRPM_EXIT_WR)
10456 {
10457 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10458 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10459 }
10460 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10461}
10462
10463
10464/**
10465 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10466 */
10467HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10468{
10469 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10470
10471 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10472 {
10473 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10474 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10475 }
10476 return vmxHCExitMwait(pVCpu, pVmxTransient);
10477}
10478
10479
10480/**
10481 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10482 * VM-exit.
10483 */
10484HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10485{
10486 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10487
10488 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10489 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10490 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10491 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10492}
10493
10494
10495/**
10496 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10497 */
10498HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10499{
10500 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10501
10502 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10503 {
10504 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10505 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10506 }
10507 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10508}
10509
10510
10511/**
10512 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10513 */
10514HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10515{
10516 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10517
10518 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10519 * PAUSE when executing a nested-guest? If it does not, we would not need
10520 * to check for the intercepts here. Just call VM-exit... */
10521
10522 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10523 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10524 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10525 {
10526 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10527 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10528 }
10529 return vmxHCExitPause(pVCpu, pVmxTransient);
10530}
10531
10532
10533/**
10534 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10535 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10536 */
10537HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10538{
10539 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10540
10541 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10542 {
10543 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10544 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10545 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10546 }
10547 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10548}
10549
10550
10551/**
10552 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10553 * VM-exit.
10554 */
10555HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10556{
10557 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10558
10559 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10560 | HMVMX_READ_EXIT_INSTR_LEN
10561 | HMVMX_READ_IDT_VECTORING_INFO
10562 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10563
10564 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10565
10566 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10567 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10568
10569 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10570 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10571 pVmxTransient->uIdtVectoringErrorCode);
10572 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10573}
10574
10575
10576/**
10577 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10578 * Conditional VM-exit.
10579 */
10580HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10581{
10582 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10583
10584 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10585 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10586 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10587}
10588
10589
10590/**
10591 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10592 * Conditional VM-exit.
10593 */
10594HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10595{
10596 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10597
10598 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10599 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10600 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10601}
10602
10603
10604/**
10605 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10606 */
10607HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10608{
10609 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10610
10611 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10612 {
10613 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10614 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10615 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10616 }
10617 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10618}
10619
10620
10621/**
10622 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10623 */
10624HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10625{
10626 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10627
10628 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10629 {
10630 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10631 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10632 }
10633 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10634}
10635
10636
10637/**
10638 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10639 */
10640HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10641{
10642 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10643
10644 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10645 {
10646 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10647 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10648 | HMVMX_READ_EXIT_INSTR_INFO
10649 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10650 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10651 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10652 }
10653 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10654}
10655
10656
10657/**
10658 * Nested-guest VM-exit handler for invalid-guest state
10659 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10660 */
10661HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10662{
10663 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10664
10665 /*
10666 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10667 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10668 * Handle it like it's in an invalid guest state of the outer guest.
10669 *
10670 * When the fast path is implemented, this should be changed to cause the corresponding
10671 * nested-guest VM-exit.
10672 */
10673 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10674}
10675
10676
10677/**
10678 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10679 * and only provide the instruction length.
10680 *
10681 * Unconditional VM-exit.
10682 */
10683HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10684{
10685 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10686
10687#ifdef VBOX_STRICT
10688 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10689 switch (pVmxTransient->uExitReason)
10690 {
10691 case VMX_EXIT_ENCLS:
10692 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10693 break;
10694
10695 case VMX_EXIT_VMFUNC:
10696 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10697 break;
10698 }
10699#endif
10700
10701 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10702 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10703}
10704
10705
10706/**
10707 * Nested-guest VM-exit handler for instructions that provide instruction length as
10708 * well as more information.
10709 *
10710 * Unconditional VM-exit.
10711 */
10712HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10713{
10714 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10715
10716# ifdef VBOX_STRICT
10717 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10718 switch (pVmxTransient->uExitReason)
10719 {
10720 case VMX_EXIT_GDTR_IDTR_ACCESS:
10721 case VMX_EXIT_LDTR_TR_ACCESS:
10722 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10723 break;
10724
10725 case VMX_EXIT_RDRAND:
10726 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10727 break;
10728
10729 case VMX_EXIT_RDSEED:
10730 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10731 break;
10732
10733 case VMX_EXIT_XSAVES:
10734 case VMX_EXIT_XRSTORS:
10735 /** @todo NSTVMX: Verify XSS-bitmap. */
10736 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10737 break;
10738
10739 case VMX_EXIT_UMWAIT:
10740 case VMX_EXIT_TPAUSE:
10741 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10742 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10743 break;
10744
10745 case VMX_EXIT_LOADIWKEY:
10746 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10747 break;
10748 }
10749# endif
10750
10751 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10752 | HMVMX_READ_EXIT_INSTR_LEN
10753 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10754 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10755 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10756}
10757
10758# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10759
10760/**
10761 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10762 * Conditional VM-exit.
10763 */
10764HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10765{
10766 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10767 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10768
10769 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10770 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10771 {
10772 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10773 | HMVMX_READ_EXIT_INSTR_LEN
10774 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10775 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10776 | HMVMX_READ_IDT_VECTORING_INFO
10777 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10778 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10779 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10780 AssertRCReturn(rc, rc);
10781
10782 /*
10783 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10784 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10785 * it's its problem to deal with that issue and we'll clear the recovered event.
10786 */
10787 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10788 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10789 { /*likely*/ }
10790 else
10791 {
10792 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10793 return rcStrict;
10794 }
10795 uint32_t const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10796
10797 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10798 uint64_t const uExitQual = pVmxTransient->uExitQual;
10799
10800 RTGCPTR GCPtrNestedFault;
10801 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10802 if (fIsLinearAddrValid)
10803 {
10804 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10805 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10806 }
10807 else
10808 GCPtrNestedFault = 0;
10809
10810 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10811 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10812 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10813 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10814 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10815
10816 PGMPTWALK Walk;
10817 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10818 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
10819 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
10820 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10821 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10822 if (RT_SUCCESS(rcStrict))
10823 return rcStrict;
10824
10825 if (fClearEventOnForward)
10826 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10827
10828 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10829 pVmxTransient->uIdtVectoringErrorCode);
10830 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10831 {
10832 VMXVEXITINFO const ExitInfo
10833 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10834 pVmxTransient->uExitQual,
10835 pVmxTransient->cbExitInstr,
10836 pVmxTransient->uGuestLinearAddr,
10837 pVmxTransient->uGuestPhysicalAddr);
10838 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10839 }
10840
10841 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10842 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10843 }
10844
10845 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10846}
10847
10848
10849/**
10850 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10851 * Conditional VM-exit.
10852 */
10853HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10854{
10855 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10856 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10857
10858 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10859 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10860 {
10861 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10862 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10863 AssertRCReturn(rc, rc);
10864
10865 PGMPTWALK Walk;
10866 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10867 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10868 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
10869 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10870 0 /* GCPtrNestedFault */, &Walk);
10871 if (RT_SUCCESS(rcStrict))
10872 {
10873 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10874 return rcStrict;
10875 }
10876
10877 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10878 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
10879 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10880
10881 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10882 pVmxTransient->uIdtVectoringErrorCode);
10883 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10884 }
10885
10886 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10887}
10888
10889# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10890
10891/** @} */
10892#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10893
10894
10895/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10896 * probes.
10897 *
10898 * The following few functions and associated structure contains the bloat
10899 * necessary for providing detailed debug events and dtrace probes as well as
10900 * reliable host side single stepping. This works on the principle of
10901 * "subclassing" the normal execution loop and workers. We replace the loop
10902 * method completely and override selected helpers to add necessary adjustments
10903 * to their core operation.
10904 *
10905 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10906 * any performance for debug and analysis features.
10907 *
10908 * @{
10909 */
10910
10911/**
10912 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10913 * the debug run loop.
10914 */
10915typedef struct VMXRUNDBGSTATE
10916{
10917 /** The RIP we started executing at. This is for detecting that we stepped. */
10918 uint64_t uRipStart;
10919 /** The CS we started executing with. */
10920 uint16_t uCsStart;
10921
10922 /** Whether we've actually modified the 1st execution control field. */
10923 bool fModifiedProcCtls : 1;
10924 /** Whether we've actually modified the 2nd execution control field. */
10925 bool fModifiedProcCtls2 : 1;
10926 /** Whether we've actually modified the exception bitmap. */
10927 bool fModifiedXcptBitmap : 1;
10928
10929 /** We desire the modified the CR0 mask to be cleared. */
10930 bool fClearCr0Mask : 1;
10931 /** We desire the modified the CR4 mask to be cleared. */
10932 bool fClearCr4Mask : 1;
10933 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10934 uint32_t fCpe1Extra;
10935 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10936 uint32_t fCpe1Unwanted;
10937 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10938 uint32_t fCpe2Extra;
10939 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10940 uint32_t bmXcptExtra;
10941 /** The sequence number of the Dtrace provider settings the state was
10942 * configured against. */
10943 uint32_t uDtraceSettingsSeqNo;
10944 /** VM-exits to check (one bit per VM-exit). */
10945 uint32_t bmExitsToCheck[3];
10946
10947 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10948 uint32_t fProcCtlsInitial;
10949 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10950 uint32_t fProcCtls2Initial;
10951 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10952 uint32_t bmXcptInitial;
10953} VMXRUNDBGSTATE;
10954AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10955typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10956
10957
10958/**
10959 * Initializes the VMXRUNDBGSTATE structure.
10960 *
10961 * @param pVCpu The cross context virtual CPU structure of the
10962 * calling EMT.
10963 * @param pVmxTransient The VMX-transient structure.
10964 * @param pDbgState The debug state to initialize.
10965 */
10966static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10967{
10968 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10969 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10970
10971 pDbgState->fModifiedProcCtls = false;
10972 pDbgState->fModifiedProcCtls2 = false;
10973 pDbgState->fModifiedXcptBitmap = false;
10974 pDbgState->fClearCr0Mask = false;
10975 pDbgState->fClearCr4Mask = false;
10976 pDbgState->fCpe1Extra = 0;
10977 pDbgState->fCpe1Unwanted = 0;
10978 pDbgState->fCpe2Extra = 0;
10979 pDbgState->bmXcptExtra = 0;
10980 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10981 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10982 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10983}
10984
10985
10986/**
10987 * Updates the VMSC fields with changes requested by @a pDbgState.
10988 *
10989 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10990 * immediately before executing guest code, i.e. when interrupts are disabled.
10991 * We don't check status codes here as we cannot easily assert or return in the
10992 * latter case.
10993 *
10994 * @param pVCpu The cross context virtual CPU structure.
10995 * @param pVmxTransient The VMX-transient structure.
10996 * @param pDbgState The debug state.
10997 */
10998static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10999{
11000 /*
11001 * Ensure desired flags in VMCS control fields are set.
11002 * (Ignoring write failure here, as we're committed and it's just debug extras.)
11003 *
11004 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
11005 * there should be no stale data in pCtx at this point.
11006 */
11007 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11008 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
11009 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
11010 {
11011 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
11012 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
11013 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
11014 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
11015 pDbgState->fModifiedProcCtls = true;
11016 }
11017
11018 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11019 {
11020 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11021 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11022 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11023 pDbgState->fModifiedProcCtls2 = true;
11024 }
11025
11026 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11027 {
11028 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11029 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11030 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11031 pDbgState->fModifiedXcptBitmap = true;
11032 }
11033
11034 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11035 {
11036 pVmcsInfo->u64Cr0Mask = 0;
11037 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
11038 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11039 }
11040
11041 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11042 {
11043 pVmcsInfo->u64Cr4Mask = 0;
11044 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11045 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11046 }
11047
11048 NOREF(pVCpu);
11049}
11050
11051
11052/**
11053 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11054 * re-entry next time around.
11055 *
11056 * @returns Strict VBox status code (i.e. informational status codes too).
11057 * @param pVCpu The cross context virtual CPU structure.
11058 * @param pVmxTransient The VMX-transient structure.
11059 * @param pDbgState The debug state.
11060 * @param rcStrict The return code from executing the guest using single
11061 * stepping.
11062 */
11063static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11064 VBOXSTRICTRC rcStrict)
11065{
11066 /*
11067 * Restore VM-exit control settings as we may not reenter this function the
11068 * next time around.
11069 */
11070 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11071
11072 /* We reload the initial value, trigger what we can of recalculations the
11073 next time around. From the looks of things, that's all that's required atm. */
11074 if (pDbgState->fModifiedProcCtls)
11075 {
11076 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11077 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11078 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11079 AssertRC(rc2);
11080 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11081 }
11082
11083 /* We're currently the only ones messing with this one, so just restore the
11084 cached value and reload the field. */
11085 if ( pDbgState->fModifiedProcCtls2
11086 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11087 {
11088 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11089 AssertRC(rc2);
11090 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11091 }
11092
11093 /* If we've modified the exception bitmap, we restore it and trigger
11094 reloading and partial recalculation the next time around. */
11095 if (pDbgState->fModifiedXcptBitmap)
11096 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11097
11098 return rcStrict;
11099}
11100
11101
11102/**
11103 * Configures VM-exit controls for current DBGF and DTrace settings.
11104 *
11105 * This updates @a pDbgState and the VMCS execution control fields to reflect
11106 * the necessary VM-exits demanded by DBGF and DTrace.
11107 *
11108 * @param pVCpu The cross context virtual CPU structure.
11109 * @param pVmxTransient The VMX-transient structure. May update
11110 * fUpdatedTscOffsettingAndPreemptTimer.
11111 * @param pDbgState The debug state.
11112 */
11113static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11114{
11115#ifndef IN_NEM_DARWIN
11116 /*
11117 * Take down the dtrace serial number so we can spot changes.
11118 */
11119 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11120 ASMCompilerBarrier();
11121#endif
11122
11123 /*
11124 * We'll rebuild most of the middle block of data members (holding the
11125 * current settings) as we go along here, so start by clearing it all.
11126 */
11127 pDbgState->bmXcptExtra = 0;
11128 pDbgState->fCpe1Extra = 0;
11129 pDbgState->fCpe1Unwanted = 0;
11130 pDbgState->fCpe2Extra = 0;
11131 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11132 pDbgState->bmExitsToCheck[i] = 0;
11133
11134 /*
11135 * Software interrupts (INT XXh) - no idea how to trigger these...
11136 */
11137 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11138 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11139 || VBOXVMM_INT_SOFTWARE_ENABLED())
11140 {
11141 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11142 }
11143
11144 /*
11145 * INT3 breakpoints - triggered by #BP exceptions.
11146 */
11147 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11148 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11149
11150 /*
11151 * Exception bitmap and XCPT events+probes.
11152 */
11153 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11154 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11155 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11156
11157 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11158 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11159 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11160 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11161 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11162 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11163 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11164 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11165 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11166 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11167 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11168 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11169 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11170 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11171 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11172 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11173 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11174 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11175
11176 if (pDbgState->bmXcptExtra)
11177 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11178
11179 /*
11180 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11181 *
11182 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11183 * So, when adding/changing/removing please don't forget to update it.
11184 *
11185 * Some of the macros are picking up local variables to save horizontal space,
11186 * (being able to see it in a table is the lesser evil here).
11187 */
11188#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11189 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11190 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11191#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11192 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11193 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11194 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11195 } else do { } while (0)
11196#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11197 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11198 { \
11199 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11200 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11201 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11202 } else do { } while (0)
11203#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11204 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11205 { \
11206 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11207 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11208 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11209 } else do { } while (0)
11210#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11211 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11212 { \
11213 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11214 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11215 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11216 } else do { } while (0)
11217
11218 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11219 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11220 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11221 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11222 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11223
11224 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11225 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11226 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11227 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11228 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11229 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11230 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11231 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11232 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11233 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11234 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11235 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11236 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11237 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11238 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11239 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11240 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11241 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11242 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11243 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11244 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11245 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11246 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11247 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11248 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11249 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11250 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11251 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11252 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11253 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11254 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11255 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11256 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11257 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11258 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11259 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11260
11261 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11262 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11263 {
11264 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11265 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11266 AssertRC(rc);
11267
11268#if 0 /** @todo fix me */
11269 pDbgState->fClearCr0Mask = true;
11270 pDbgState->fClearCr4Mask = true;
11271#endif
11272 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11273 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11274 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11275 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11276 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11277 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11278 require clearing here and in the loop if we start using it. */
11279 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11280 }
11281 else
11282 {
11283 if (pDbgState->fClearCr0Mask)
11284 {
11285 pDbgState->fClearCr0Mask = false;
11286 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11287 }
11288 if (pDbgState->fClearCr4Mask)
11289 {
11290 pDbgState->fClearCr4Mask = false;
11291 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11292 }
11293 }
11294 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11295 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11296
11297 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11298 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11299 {
11300 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11301 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11302 }
11303 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11304 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11305
11306 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11307 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11308 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11309 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11310 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11311 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11312 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11313 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11314#if 0 /** @todo too slow, fix handler. */
11315 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11316#endif
11317 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11318
11319 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11320 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11321 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11322 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11323 {
11324 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11325 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11326 }
11327 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11328 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11329 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11330 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11331
11332 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11333 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11334 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11335 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11336 {
11337 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11338 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11339 }
11340 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11341 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11342 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11343 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11344
11345 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11346 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11347 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11348 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11349 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11350 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11351 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11352 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11353 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11354 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11355 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11356 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11357 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11358 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11359 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11360 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11361 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11362 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11363 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11364 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11365 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11366 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11367
11368#undef IS_EITHER_ENABLED
11369#undef SET_ONLY_XBM_IF_EITHER_EN
11370#undef SET_CPE1_XBM_IF_EITHER_EN
11371#undef SET_CPEU_XBM_IF_EITHER_EN
11372#undef SET_CPE2_XBM_IF_EITHER_EN
11373
11374 /*
11375 * Sanitize the control stuff.
11376 */
11377 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11378 if (pDbgState->fCpe2Extra)
11379 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11380 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11381 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11382#ifndef IN_NEM_DARWIN
11383 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11384 {
11385 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11386 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11387 }
11388#else
11389 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11390 {
11391 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11392 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11393 }
11394#endif
11395
11396 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11397 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11398 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11399 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11400}
11401
11402
11403/**
11404 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11405 * appropriate.
11406 *
11407 * The caller has checked the VM-exit against the
11408 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11409 * already, so we don't have to do that either.
11410 *
11411 * @returns Strict VBox status code (i.e. informational status codes too).
11412 * @param pVCpu The cross context virtual CPU structure.
11413 * @param pVmxTransient The VMX-transient structure.
11414 * @param uExitReason The VM-exit reason.
11415 *
11416 * @remarks The name of this function is displayed by dtrace, so keep it short
11417 * and to the point. No longer than 33 chars long, please.
11418 */
11419static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11420{
11421 /*
11422 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11423 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11424 *
11425 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11426 * does. Must add/change/remove both places. Same ordering, please.
11427 *
11428 * Added/removed events must also be reflected in the next section
11429 * where we dispatch dtrace events.
11430 */
11431 bool fDtrace1 = false;
11432 bool fDtrace2 = false;
11433 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11434 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11435 uint32_t uEventArg = 0;
11436#define SET_EXIT(a_EventSubName) \
11437 do { \
11438 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11439 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11440 } while (0)
11441#define SET_BOTH(a_EventSubName) \
11442 do { \
11443 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11444 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11445 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11446 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11447 } while (0)
11448 switch (uExitReason)
11449 {
11450 case VMX_EXIT_MTF:
11451 return vmxHCExitMtf(pVCpu, pVmxTransient);
11452
11453 case VMX_EXIT_XCPT_OR_NMI:
11454 {
11455 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11456 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11457 {
11458 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11459 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11460 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11461 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11462 {
11463 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11464 {
11465 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11466 uEventArg = pVmxTransient->uExitIntErrorCode;
11467 }
11468 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11469 switch (enmEvent1)
11470 {
11471 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11472 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11473 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11474 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11475 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11476 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11477 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11478 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11479 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11480 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11481 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11482 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11483 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11484 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11485 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11486 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11487 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11488 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11489 default: break;
11490 }
11491 }
11492 else
11493 AssertFailed();
11494 break;
11495
11496 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11497 uEventArg = idxVector;
11498 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11499 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11500 break;
11501 }
11502 break;
11503 }
11504
11505 case VMX_EXIT_TRIPLE_FAULT:
11506 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11507 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11508 break;
11509 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11510 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11511 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11512 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11513 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11514
11515 /* Instruction specific VM-exits: */
11516 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11517 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11518 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11519 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11520 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11521 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11522 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11523 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11524 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11525 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11526 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11527 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11528 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11529 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11530 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11531 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11532 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11533 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11534 case VMX_EXIT_MOV_CRX:
11535 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11536 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11537 SET_BOTH(CRX_READ);
11538 else
11539 SET_BOTH(CRX_WRITE);
11540 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11541 break;
11542 case VMX_EXIT_MOV_DRX:
11543 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11544 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11545 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11546 SET_BOTH(DRX_READ);
11547 else
11548 SET_BOTH(DRX_WRITE);
11549 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11550 break;
11551 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11552 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11553 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11554 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11555 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11556 case VMX_EXIT_GDTR_IDTR_ACCESS:
11557 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11558 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11559 {
11560 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11561 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11562 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11563 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11564 }
11565 break;
11566
11567 case VMX_EXIT_LDTR_TR_ACCESS:
11568 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11569 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11570 {
11571 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11572 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11573 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11574 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11575 }
11576 break;
11577
11578 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11579 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11580 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11581 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11582 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11583 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11584 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11585 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11586 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11587 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11588 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11589
11590 /* Events that aren't relevant at this point. */
11591 case VMX_EXIT_EXT_INT:
11592 case VMX_EXIT_INT_WINDOW:
11593 case VMX_EXIT_NMI_WINDOW:
11594 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11595 case VMX_EXIT_PREEMPT_TIMER:
11596 case VMX_EXIT_IO_INSTR:
11597 break;
11598
11599 /* Errors and unexpected events. */
11600 case VMX_EXIT_INIT_SIGNAL:
11601 case VMX_EXIT_SIPI:
11602 case VMX_EXIT_IO_SMI:
11603 case VMX_EXIT_SMI:
11604 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11605 case VMX_EXIT_ERR_MSR_LOAD:
11606 case VMX_EXIT_ERR_MACHINE_CHECK:
11607 case VMX_EXIT_PML_FULL:
11608 case VMX_EXIT_VIRTUALIZED_EOI:
11609 break;
11610
11611 default:
11612 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11613 break;
11614 }
11615#undef SET_BOTH
11616#undef SET_EXIT
11617
11618 /*
11619 * Dtrace tracepoints go first. We do them here at once so we don't
11620 * have to copy the guest state saving and stuff a few dozen times.
11621 * Down side is that we've got to repeat the switch, though this time
11622 * we use enmEvent since the probes are a subset of what DBGF does.
11623 */
11624 if (fDtrace1 || fDtrace2)
11625 {
11626 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11627 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11628 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11629 switch (enmEvent1)
11630 {
11631 /** @todo consider which extra parameters would be helpful for each probe. */
11632 case DBGFEVENT_END: break;
11633 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11634 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11635 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11636 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11637 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11638 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11639 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11640 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11641 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11642 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11643 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11644 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11645 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11646 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11647 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11648 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11649 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11650 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11651 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11652 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11653 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11654 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11655 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11656 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11657 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11658 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11659 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11660 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11661 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11662 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11663 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11664 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11665 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11666 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11667 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11668 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11669 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11670 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11671 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11672 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11673 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11674 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11675 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11676 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11677 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11678 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11679 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11680 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11681 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11682 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11683 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11684 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11685 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11686 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11687 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11688 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11689 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11690 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11691 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11692 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11693 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11694 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11695 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11696 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11697 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11698 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11699 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11700 }
11701 switch (enmEvent2)
11702 {
11703 /** @todo consider which extra parameters would be helpful for each probe. */
11704 case DBGFEVENT_END: break;
11705 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11706 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11707 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11708 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11709 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11710 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11711 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11712 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11713 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11714 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11715 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11716 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11717 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11718 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11719 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11720 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11721 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11722 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11723 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11724 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11725 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11726 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11727 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11728 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11729 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11730 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11731 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11732 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11733 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11734 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11735 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11736 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11737 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11738 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11739 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11740 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11741 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11742 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11743 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11744 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11745 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11746 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11747 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11748 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11749 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11750 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11751 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11752 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11753 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11754 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11755 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11756 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11757 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11758 }
11759 }
11760
11761 /*
11762 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11763 * the DBGF call will do a full check).
11764 *
11765 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11766 * Note! If we have to events, we prioritize the first, i.e. the instruction
11767 * one, in order to avoid event nesting.
11768 */
11769 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11770 if ( enmEvent1 != DBGFEVENT_END
11771 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11772 {
11773 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11774 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11775 if (rcStrict != VINF_SUCCESS)
11776 return rcStrict;
11777 }
11778 else if ( enmEvent2 != DBGFEVENT_END
11779 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11780 {
11781 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11782 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11783 if (rcStrict != VINF_SUCCESS)
11784 return rcStrict;
11785 }
11786
11787 return VINF_SUCCESS;
11788}
11789
11790
11791/**
11792 * Single-stepping VM-exit filtering.
11793 *
11794 * This is preprocessing the VM-exits and deciding whether we've gotten far
11795 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11796 * handling is performed.
11797 *
11798 * @returns Strict VBox status code (i.e. informational status codes too).
11799 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11800 * @param pVmxTransient The VMX-transient structure.
11801 * @param pDbgState The debug state.
11802 */
11803DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11804{
11805 /*
11806 * Expensive (saves context) generic dtrace VM-exit probe.
11807 */
11808 uint32_t const uExitReason = pVmxTransient->uExitReason;
11809 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11810 { /* more likely */ }
11811 else
11812 {
11813 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11814 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11815 AssertRC(rc);
11816 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11817 }
11818
11819#ifndef IN_NEM_DARWIN
11820 /*
11821 * Check for host NMI, just to get that out of the way.
11822 */
11823 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11824 { /* normally likely */ }
11825 else
11826 {
11827 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11828 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11829 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11830 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11831 }
11832#endif
11833
11834 /*
11835 * Check for single stepping event if we're stepping.
11836 */
11837 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11838 {
11839 switch (uExitReason)
11840 {
11841 case VMX_EXIT_MTF:
11842 return vmxHCExitMtf(pVCpu, pVmxTransient);
11843
11844 /* Various events: */
11845 case VMX_EXIT_XCPT_OR_NMI:
11846 case VMX_EXIT_EXT_INT:
11847 case VMX_EXIT_TRIPLE_FAULT:
11848 case VMX_EXIT_INT_WINDOW:
11849 case VMX_EXIT_NMI_WINDOW:
11850 case VMX_EXIT_TASK_SWITCH:
11851 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11852 case VMX_EXIT_APIC_ACCESS:
11853 case VMX_EXIT_EPT_VIOLATION:
11854 case VMX_EXIT_EPT_MISCONFIG:
11855 case VMX_EXIT_PREEMPT_TIMER:
11856
11857 /* Instruction specific VM-exits: */
11858 case VMX_EXIT_CPUID:
11859 case VMX_EXIT_GETSEC:
11860 case VMX_EXIT_HLT:
11861 case VMX_EXIT_INVD:
11862 case VMX_EXIT_INVLPG:
11863 case VMX_EXIT_RDPMC:
11864 case VMX_EXIT_RDTSC:
11865 case VMX_EXIT_RSM:
11866 case VMX_EXIT_VMCALL:
11867 case VMX_EXIT_VMCLEAR:
11868 case VMX_EXIT_VMLAUNCH:
11869 case VMX_EXIT_VMPTRLD:
11870 case VMX_EXIT_VMPTRST:
11871 case VMX_EXIT_VMREAD:
11872 case VMX_EXIT_VMRESUME:
11873 case VMX_EXIT_VMWRITE:
11874 case VMX_EXIT_VMXOFF:
11875 case VMX_EXIT_VMXON:
11876 case VMX_EXIT_MOV_CRX:
11877 case VMX_EXIT_MOV_DRX:
11878 case VMX_EXIT_IO_INSTR:
11879 case VMX_EXIT_RDMSR:
11880 case VMX_EXIT_WRMSR:
11881 case VMX_EXIT_MWAIT:
11882 case VMX_EXIT_MONITOR:
11883 case VMX_EXIT_PAUSE:
11884 case VMX_EXIT_GDTR_IDTR_ACCESS:
11885 case VMX_EXIT_LDTR_TR_ACCESS:
11886 case VMX_EXIT_INVEPT:
11887 case VMX_EXIT_RDTSCP:
11888 case VMX_EXIT_INVVPID:
11889 case VMX_EXIT_WBINVD:
11890 case VMX_EXIT_XSETBV:
11891 case VMX_EXIT_RDRAND:
11892 case VMX_EXIT_INVPCID:
11893 case VMX_EXIT_VMFUNC:
11894 case VMX_EXIT_RDSEED:
11895 case VMX_EXIT_XSAVES:
11896 case VMX_EXIT_XRSTORS:
11897 {
11898 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11899 AssertRCReturn(rc, rc);
11900 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11901 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11902 return VINF_EM_DBG_STEPPED;
11903 break;
11904 }
11905
11906 /* Errors and unexpected events: */
11907 case VMX_EXIT_INIT_SIGNAL:
11908 case VMX_EXIT_SIPI:
11909 case VMX_EXIT_IO_SMI:
11910 case VMX_EXIT_SMI:
11911 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11912 case VMX_EXIT_ERR_MSR_LOAD:
11913 case VMX_EXIT_ERR_MACHINE_CHECK:
11914 case VMX_EXIT_PML_FULL:
11915 case VMX_EXIT_VIRTUALIZED_EOI:
11916 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11917 break;
11918
11919 default:
11920 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11921 break;
11922 }
11923 }
11924
11925 /*
11926 * Check for debugger event breakpoints and dtrace probes.
11927 */
11928 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11929 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11930 {
11931 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11932 if (rcStrict != VINF_SUCCESS)
11933 return rcStrict;
11934 }
11935
11936 /*
11937 * Normal processing.
11938 */
11939#ifdef HMVMX_USE_FUNCTION_TABLE
11940 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11941#else
11942 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11943#endif
11944}
11945
11946/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette