VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 102024

Last change on this file since 102024 was 102024, checked in by vboxsync, 15 months ago

VMM: VMXAllTemplate.cpp.h: Added new VMCS field encodings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 526.5 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 102024 2023-11-09 11:53:55Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330 VMX_VMCS16_HLAT_PREFIX_SIZE,
331 VMX_VMCS16_LAST_PID_PTR_INDEX,
332
333 /* 16-bit guest-state fields. */
334 VMX_VMCS16_GUEST_ES_SEL,
335 VMX_VMCS16_GUEST_CS_SEL,
336 VMX_VMCS16_GUEST_SS_SEL,
337 VMX_VMCS16_GUEST_DS_SEL,
338 VMX_VMCS16_GUEST_FS_SEL,
339 VMX_VMCS16_GUEST_GS_SEL,
340 VMX_VMCS16_GUEST_LDTR_SEL,
341 VMX_VMCS16_GUEST_TR_SEL,
342 VMX_VMCS16_GUEST_INTR_STATUS,
343 VMX_VMCS16_GUEST_PML_INDEX,
344 VMX_VMCS16_GUEST_UINV,
345
346 /* 16-bits host-state fields. */
347 VMX_VMCS16_HOST_ES_SEL,
348 VMX_VMCS16_HOST_CS_SEL,
349 VMX_VMCS16_HOST_SS_SEL,
350 VMX_VMCS16_HOST_DS_SEL,
351 VMX_VMCS16_HOST_FS_SEL,
352 VMX_VMCS16_HOST_GS_SEL,
353 VMX_VMCS16_HOST_TR_SEL,
354
355 /* 64-bit control fields. */
356 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
357 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
358 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
359 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
360 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
361 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
362 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
363 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
364 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
365 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
366 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
367 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
368 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
369 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
370 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
371 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
372 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
373 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
374 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
375 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
376 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
377 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
378 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
379 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
380 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
381 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
382 VMX_VMCS64_CTRL_EPTP_FULL,
383 VMX_VMCS64_CTRL_EPTP_HIGH,
384 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
385 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
386 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
387 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
388 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
389 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
390 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
391 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
392 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
393 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
394 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
395 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
396 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
397 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
398 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
399 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
400 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
401 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
402 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
403 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
404 VMX_VMCS64_CTRL_SPPTP_FULL,
405 VMX_VMCS64_CTRL_SPPTP_HIGH,
406 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
407 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
408 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
409 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
410 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
411 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
412 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_FULL,
413 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_HIGH,
414 VMX_VMCS64_CTRL_HLAT_PTR_FULL,
415 VMX_VMCS64_CTRL_HLAT_PTR_HIGH,
416 VMX_VMCS64_CTRL_EXIT2_FULL,
417 VMX_VMCS64_CTRL_EXIT2_HIGH,
418 VMX_VMCS64_CTRL_SPEC_CTRL_MASK_FULL,
419 VMX_VMCS64_CTRL_SPEC_CTRL_MASK_HIGH,
420 VMX_VMCS64_CTRL_SPEC_CTRL_SHADOW_FULL,
421 VMX_VMCS64_CTRL_SPEC_CTRL_SHADOW_HIGH,
422
423 /* 64-bit read-only data fields. */
424 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
425 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
426
427 /* 64-bit guest-state fields. */
428 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
429 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
430 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
431 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
432 VMX_VMCS64_GUEST_PAT_FULL,
433 VMX_VMCS64_GUEST_PAT_HIGH,
434 VMX_VMCS64_GUEST_EFER_FULL,
435 VMX_VMCS64_GUEST_EFER_HIGH,
436 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
437 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
438 VMX_VMCS64_GUEST_PDPTE0_FULL,
439 VMX_VMCS64_GUEST_PDPTE0_HIGH,
440 VMX_VMCS64_GUEST_PDPTE1_FULL,
441 VMX_VMCS64_GUEST_PDPTE1_HIGH,
442 VMX_VMCS64_GUEST_PDPTE2_FULL,
443 VMX_VMCS64_GUEST_PDPTE2_HIGH,
444 VMX_VMCS64_GUEST_PDPTE3_FULL,
445 VMX_VMCS64_GUEST_PDPTE3_HIGH,
446 VMX_VMCS64_GUEST_BNDCFGS_FULL,
447 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
448 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
449 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
450 VMX_VMCS64_GUEST_PKRS_FULL,
451 VMX_VMCS64_GUEST_PKRS_HIGH,
452
453 /* 64-bit host-state fields. */
454 VMX_VMCS64_HOST_PAT_FULL,
455 VMX_VMCS64_HOST_PAT_HIGH,
456 VMX_VMCS64_HOST_EFER_FULL,
457 VMX_VMCS64_HOST_EFER_HIGH,
458 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
459 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
460 VMX_VMCS64_HOST_PKRS_FULL,
461 VMX_VMCS64_HOST_PKRS_HIGH,
462
463 /* 32-bit control fields. */
464 VMX_VMCS32_CTRL_PIN_EXEC,
465 VMX_VMCS32_CTRL_PROC_EXEC,
466 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
467 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
468 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
469 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
470 VMX_VMCS32_CTRL_EXIT,
471 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
472 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
473 VMX_VMCS32_CTRL_ENTRY,
474 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
475 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
476 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
477 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
478 VMX_VMCS32_CTRL_TPR_THRESHOLD,
479 VMX_VMCS32_CTRL_PROC_EXEC2,
480 VMX_VMCS32_CTRL_PLE_GAP,
481 VMX_VMCS32_CTRL_PLE_WINDOW,
482 VMX_VMCS32_CTRL_INSTR_TIMEOUT,
483
484 /* 32-bits read-only fields. */
485 VMX_VMCS32_RO_VM_INSTR_ERROR,
486 VMX_VMCS32_RO_EXIT_REASON,
487 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
488 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
489 VMX_VMCS32_RO_IDT_VECTORING_INFO,
490 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
491 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
492 VMX_VMCS32_RO_EXIT_INSTR_INFO,
493
494 /* 32-bit guest-state fields. */
495 VMX_VMCS32_GUEST_ES_LIMIT,
496 VMX_VMCS32_GUEST_CS_LIMIT,
497 VMX_VMCS32_GUEST_SS_LIMIT,
498 VMX_VMCS32_GUEST_DS_LIMIT,
499 VMX_VMCS32_GUEST_FS_LIMIT,
500 VMX_VMCS32_GUEST_GS_LIMIT,
501 VMX_VMCS32_GUEST_LDTR_LIMIT,
502 VMX_VMCS32_GUEST_TR_LIMIT,
503 VMX_VMCS32_GUEST_GDTR_LIMIT,
504 VMX_VMCS32_GUEST_IDTR_LIMIT,
505 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
506 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
507 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
508 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
509 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
510 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
511 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
512 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
513 VMX_VMCS32_GUEST_INT_STATE,
514 VMX_VMCS32_GUEST_ACTIVITY_STATE,
515 VMX_VMCS32_GUEST_SMBASE,
516 VMX_VMCS32_GUEST_SYSENTER_CS,
517 VMX_VMCS32_PREEMPT_TIMER_VALUE,
518
519 /* 32-bit host-state fields. */
520 VMX_VMCS32_HOST_SYSENTER_CS,
521
522 /* Natural-width control fields. */
523 VMX_VMCS_CTRL_CR0_MASK,
524 VMX_VMCS_CTRL_CR4_MASK,
525 VMX_VMCS_CTRL_CR0_READ_SHADOW,
526 VMX_VMCS_CTRL_CR4_READ_SHADOW,
527 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
528 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
529 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
530 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
531
532 /* Natural-width read-only data fields. */
533 VMX_VMCS_RO_EXIT_QUALIFICATION,
534 VMX_VMCS_RO_IO_RCX,
535 VMX_VMCS_RO_IO_RSI,
536 VMX_VMCS_RO_IO_RDI,
537 VMX_VMCS_RO_IO_RIP,
538 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
539
540 /* Natural-width guest-state field */
541 VMX_VMCS_GUEST_CR0,
542 VMX_VMCS_GUEST_CR3,
543 VMX_VMCS_GUEST_CR4,
544 VMX_VMCS_GUEST_ES_BASE,
545 VMX_VMCS_GUEST_CS_BASE,
546 VMX_VMCS_GUEST_SS_BASE,
547 VMX_VMCS_GUEST_DS_BASE,
548 VMX_VMCS_GUEST_FS_BASE,
549 VMX_VMCS_GUEST_GS_BASE,
550 VMX_VMCS_GUEST_LDTR_BASE,
551 VMX_VMCS_GUEST_TR_BASE,
552 VMX_VMCS_GUEST_GDTR_BASE,
553 VMX_VMCS_GUEST_IDTR_BASE,
554 VMX_VMCS_GUEST_DR7,
555 VMX_VMCS_GUEST_RSP,
556 VMX_VMCS_GUEST_RIP,
557 VMX_VMCS_GUEST_RFLAGS,
558 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
559 VMX_VMCS_GUEST_SYSENTER_ESP,
560 VMX_VMCS_GUEST_SYSENTER_EIP,
561 VMX_VMCS_GUEST_S_CET,
562 VMX_VMCS_GUEST_SSP,
563 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
564
565 /* Natural-width host-state fields */
566 VMX_VMCS_HOST_CR0,
567 VMX_VMCS_HOST_CR3,
568 VMX_VMCS_HOST_CR4,
569 VMX_VMCS_HOST_FS_BASE,
570 VMX_VMCS_HOST_GS_BASE,
571 VMX_VMCS_HOST_TR_BASE,
572 VMX_VMCS_HOST_GDTR_BASE,
573 VMX_VMCS_HOST_IDTR_BASE,
574 VMX_VMCS_HOST_SYSENTER_ESP,
575 VMX_VMCS_HOST_SYSENTER_EIP,
576 VMX_VMCS_HOST_RSP,
577 VMX_VMCS_HOST_RIP,
578 VMX_VMCS_HOST_S_CET,
579 VMX_VMCS_HOST_SSP,
580 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
581};
582#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
583
584#ifdef HMVMX_USE_FUNCTION_TABLE
585/**
586 * VMX_EXIT dispatch table.
587 */
588static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
589{
590 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
591 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
592 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
593 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
594 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
595 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
596 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
597 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
598 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
599 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
600 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
601 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
602 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
603 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
604 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
605 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
606 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
607 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
608 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
609#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
610 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
611 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
612 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
613 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
614 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
615 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
616 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
617 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
618 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
619#else
620 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
621 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
622 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
623 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
624 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
625 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
626 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
627 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
628 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
629#endif
630 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
631 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
632 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
633 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
634 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
635 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
636 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
637 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
638 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
639 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
640 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
641 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
642 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
643 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
644 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
645 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
646 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
647 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
648 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
649 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
650 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
651 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
652#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
653 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
654#else
655 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
656#endif
657 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
658 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
659#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
660 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
661#else
662 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
663#endif
664 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
665 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
666 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
667 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
668 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
669 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
670 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
671 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
672 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
673 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
674 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
675 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
676 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
677 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
678 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
679 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
680};
681#endif /* HMVMX_USE_FUNCTION_TABLE */
682
683#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
684static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
685{
686 /* 0 */ "(Not Used)",
687 /* 1 */ "VMCALL executed in VMX root operation.",
688 /* 2 */ "VMCLEAR with invalid physical address.",
689 /* 3 */ "VMCLEAR with VMXON pointer.",
690 /* 4 */ "VMLAUNCH with non-clear VMCS.",
691 /* 5 */ "VMRESUME with non-launched VMCS.",
692 /* 6 */ "VMRESUME after VMXOFF",
693 /* 7 */ "VM-entry with invalid control fields.",
694 /* 8 */ "VM-entry with invalid host state fields.",
695 /* 9 */ "VMPTRLD with invalid physical address.",
696 /* 10 */ "VMPTRLD with VMXON pointer.",
697 /* 11 */ "VMPTRLD with incorrect revision identifier.",
698 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
699 /* 13 */ "VMWRITE to read-only VMCS component.",
700 /* 14 */ "(Not Used)",
701 /* 15 */ "VMXON executed in VMX root operation.",
702 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
703 /* 17 */ "VM-entry with non-launched executing VMCS.",
704 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
705 /* 19 */ "VMCALL with non-clear VMCS.",
706 /* 20 */ "VMCALL with invalid VM-exit control fields.",
707 /* 21 */ "(Not Used)",
708 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
709 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
710 /* 24 */ "VMCALL with invalid SMM-monitor features.",
711 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
712 /* 26 */ "VM-entry with events blocked by MOV SS.",
713 /* 27 */ "(Not Used)",
714 /* 28 */ "Invalid operand to INVEPT/INVVPID."
715};
716#endif /* VBOX_STRICT && LOG_ENABLED */
717
718
719/**
720 * Gets the CR0 guest/host mask.
721 *
722 * These bits typically does not change through the lifetime of a VM. Any bit set in
723 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
724 * by the guest.
725 *
726 * @returns The CR0 guest/host mask.
727 * @param pVCpu The cross context virtual CPU structure.
728 */
729static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
730{
731 /*
732 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
733 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
734 *
735 * Furthermore, modifications to any bits that are reserved/unspecified currently
736 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
737 * when future CPUs specify and use currently reserved/unspecified bits.
738 */
739 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
740 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
741 * and @bugref{6944}. */
742 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
743 AssertCompile(RT_HI_U32(VMX_EXIT_HOST_CR0_IGNORE_MASK) == UINT32_C(0xffffffff)); /* Paranoia. */
744 return ( X86_CR0_PE
745 | X86_CR0_NE
746 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
747 | X86_CR0_PG
748 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
749}
750
751
752/**
753 * Gets the CR4 guest/host mask.
754 *
755 * These bits typically does not change through the lifetime of a VM. Any bit set in
756 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
757 * by the guest.
758 *
759 * @returns The CR4 guest/host mask.
760 * @param pVCpu The cross context virtual CPU structure.
761 */
762static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
763{
764 /*
765 * We construct a mask of all CR4 bits that the guest can modify without causing
766 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
767 * a VM-exit when the guest attempts to modify them when executing using
768 * hardware-assisted VMX.
769 *
770 * When a feature is not exposed to the guest (and may be present on the host),
771 * we want to intercept guest modifications to the bit so we can emulate proper
772 * behavior (e.g., #GP).
773 *
774 * Furthermore, only modifications to those bits that don't require immediate
775 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
776 * depends on CR3 which might not always be the guest value while executing
777 * using hardware-assisted VMX.
778 */
779 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
780 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
781#ifdef IN_NEM_DARWIN
782 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
783#endif
784 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
785
786 /*
787 * Paranoia.
788 * Ensure features exposed to the guest are present on the host.
789 */
790 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
791#ifdef IN_NEM_DARWIN
792 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
793#endif
794 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
795
796 uint64_t const fGstMask = X86_CR4_PVI
797 | X86_CR4_TSD
798 | X86_CR4_DE
799 | X86_CR4_MCE
800 | X86_CR4_PCE
801 | X86_CR4_OSXMMEEXCPT
802 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
803#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
804 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
805 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
806#endif
807 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
808 return ~fGstMask;
809}
810
811
812/**
813 * Adds one or more exceptions to the exception bitmap and commits it to the current
814 * VMCS.
815 *
816 * @param pVCpu The cross context virtual CPU structure.
817 * @param pVmxTransient The VMX-transient structure.
818 * @param uXcptMask The exception(s) to add.
819 */
820static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
821{
822 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
823 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
824 if ((uXcptBitmap & uXcptMask) != uXcptMask)
825 {
826 uXcptBitmap |= uXcptMask;
827 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
828 AssertRC(rc);
829 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
830 }
831}
832
833
834/**
835 * Adds an exception to the exception bitmap and commits it to the current VMCS.
836 *
837 * @param pVCpu The cross context virtual CPU structure.
838 * @param pVmxTransient The VMX-transient structure.
839 * @param uXcpt The exception to add.
840 */
841static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
842{
843 Assert(uXcpt <= X86_XCPT_LAST);
844 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
845}
846
847
848/**
849 * Remove one or more exceptions from the exception bitmap and commits it to the
850 * current VMCS.
851 *
852 * This takes care of not removing the exception intercept if a nested-guest
853 * requires the exception to be intercepted.
854 *
855 * @returns VBox status code.
856 * @param pVCpu The cross context virtual CPU structure.
857 * @param pVmxTransient The VMX-transient structure.
858 * @param uXcptMask The exception(s) to remove.
859 */
860static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
861{
862 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
863 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
864 if (uXcptBitmap & uXcptMask)
865 {
866#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
867 if (!pVmxTransient->fIsNestedGuest)
868 { /* likely */ }
869 else
870 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
871#endif
872#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
873 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
874 | RT_BIT(X86_XCPT_DE)
875 | RT_BIT(X86_XCPT_NM)
876 | RT_BIT(X86_XCPT_TS)
877 | RT_BIT(X86_XCPT_UD)
878 | RT_BIT(X86_XCPT_NP)
879 | RT_BIT(X86_XCPT_SS)
880 | RT_BIT(X86_XCPT_GP)
881 | RT_BIT(X86_XCPT_PF)
882 | RT_BIT(X86_XCPT_MF));
883#elif defined(HMVMX_ALWAYS_TRAP_PF)
884 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
885#endif
886 if (uXcptMask)
887 {
888 /* Validate we are not removing any essential exception intercepts. */
889#ifndef IN_NEM_DARWIN
890 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
891#else
892 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
893#endif
894 NOREF(pVCpu);
895 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
896 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
897
898 /* Remove it from the exception bitmap. */
899 uXcptBitmap &= ~uXcptMask;
900
901 /* Commit and update the cache if necessary. */
902 if (pVmcsInfo->u32XcptBitmap != uXcptBitmap)
903 {
904 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
905 AssertRC(rc);
906 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
907 }
908 }
909 }
910 return VINF_SUCCESS;
911}
912
913
914/**
915 * Remove an exceptions from the exception bitmap and commits it to the current
916 * VMCS.
917 *
918 * @returns VBox status code.
919 * @param pVCpu The cross context virtual CPU structure.
920 * @param pVmxTransient The VMX-transient structure.
921 * @param uXcpt The exception to remove.
922 */
923static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
924{
925 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
926}
927
928#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
929
930/**
931 * Loads the shadow VMCS specified by the VMCS info. object.
932 *
933 * @returns VBox status code.
934 * @param pVmcsInfo The VMCS info. object.
935 *
936 * @remarks Can be called with interrupts disabled.
937 */
938static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
939{
940 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
941 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
942
943 return VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
944}
945
946
947/**
948 * Clears the shadow VMCS specified by the VMCS info. object.
949 *
950 * @returns VBox status code.
951 * @param pVmcsInfo The VMCS info. object.
952 *
953 * @remarks Can be called with interrupts disabled.
954 */
955static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
956{
957 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
958 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
959
960 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
961 if (RT_SUCCESS(rc))
962 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
963 return rc;
964}
965
966
967/**
968 * Switches from and to the specified VMCSes.
969 *
970 * @returns VBox status code.
971 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
972 * @param pVmcsInfoTo The VMCS info. object we are switching to.
973 *
974 * @remarks Called with interrupts disabled.
975 */
976static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
977{
978 /*
979 * Clear the VMCS we are switching out if it has not already been cleared.
980 * This will sync any CPU internal data back to the VMCS.
981 */
982 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
983 {
984 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
985 if (RT_SUCCESS(rc))
986 {
987 /*
988 * The shadow VMCS, if any, would not be active at this point since we
989 * would have cleared it while importing the virtual hardware-virtualization
990 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
991 * clear the shadow VMCS here, just assert for safety.
992 */
993 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
994 }
995 else
996 return rc;
997 }
998
999 /*
1000 * Clear the VMCS we are switching to if it has not already been cleared.
1001 * This will initialize the VMCS launch state to "clear" required for loading it.
1002 *
1003 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1004 */
1005 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1006 {
1007 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1008 if (RT_SUCCESS(rc))
1009 { /* likely */ }
1010 else
1011 return rc;
1012 }
1013
1014 /*
1015 * Finally, load the VMCS we are switching to.
1016 */
1017 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1018}
1019
1020
1021/**
1022 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1023 * caller.
1024 *
1025 * @returns VBox status code.
1026 * @param pVCpu The cross context virtual CPU structure.
1027 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1028 * true) or guest VMCS (pass false).
1029 */
1030static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1031{
1032 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1033 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1034
1035 PVMXVMCSINFO pVmcsInfoFrom;
1036 PVMXVMCSINFO pVmcsInfoTo;
1037 if (fSwitchToNstGstVmcs)
1038 {
1039 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1040 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1041 }
1042 else
1043 {
1044 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1045 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1046 }
1047
1048 /*
1049 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1050 * preemption hook code path acquires the current VMCS.
1051 */
1052 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1053
1054 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1055 if (RT_SUCCESS(rc))
1056 {
1057 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1058 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1059
1060 /*
1061 * If we are switching to a VMCS that was executed on a different host CPU or was
1062 * never executed before, flag that we need to export the host state before executing
1063 * guest/nested-guest code using hardware-assisted VMX.
1064 *
1065 * This could probably be done in a preemptible context since the preemption hook
1066 * will flag the necessary change in host context. However, since preemption is
1067 * already disabled and to avoid making assumptions about host specific code in
1068 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1069 * disabled.
1070 */
1071 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1072 { /* likely */ }
1073 else
1074 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1075
1076 ASMSetFlags(fEFlags);
1077
1078 /*
1079 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1080 * flag that we need to update the host MSR values there. Even if we decide in the
1081 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1082 * if its content differs, we would have to update the host MSRs anyway.
1083 */
1084 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1085 }
1086 else
1087 ASMSetFlags(fEFlags);
1088 return rc;
1089}
1090
1091#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1092#ifdef VBOX_STRICT
1093
1094/**
1095 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1096 * transient structure.
1097 *
1098 * @param pVCpu The cross context virtual CPU structure.
1099 * @param pVmxTransient The VMX-transient structure.
1100 */
1101DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1102{
1103 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1104 AssertRC(rc);
1105}
1106
1107
1108/**
1109 * Reads the VM-entry exception error code field from the VMCS into
1110 * the VMX transient structure.
1111 *
1112 * @param pVCpu The cross context virtual CPU structure.
1113 * @param pVmxTransient The VMX-transient structure.
1114 */
1115DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1116{
1117 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1118 AssertRC(rc);
1119}
1120
1121
1122/**
1123 * Reads the VM-entry exception error code field from the VMCS into
1124 * the VMX transient structure.
1125 *
1126 * @param pVCpu The cross context virtual CPU structure.
1127 * @param pVmxTransient The VMX-transient structure.
1128 */
1129DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1130{
1131 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1132 AssertRC(rc);
1133}
1134
1135#endif /* VBOX_STRICT */
1136
1137
1138/**
1139 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1140 *
1141 * Don't call directly unless the it's likely that some or all of the fields
1142 * given in @a a_fReadMask have already been read.
1143 *
1144 * @tparam a_fReadMask The fields to read.
1145 * @param pVCpu The cross context virtual CPU structure.
1146 * @param pVmxTransient The VMX-transient structure.
1147 */
1148template<uint32_t const a_fReadMask>
1149static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1150{
1151 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1152 | HMVMX_READ_EXIT_INSTR_LEN
1153 | HMVMX_READ_EXIT_INSTR_INFO
1154 | HMVMX_READ_IDT_VECTORING_INFO
1155 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1156 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1157 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1158 | HMVMX_READ_GUEST_LINEAR_ADDR
1159 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1160 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1161 )) == 0);
1162
1163 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1164 {
1165 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1166
1167 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1168 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1169 {
1170 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1171 AssertRC(rc);
1172 }
1173 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1174 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1175 {
1176 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1177 AssertRC(rc);
1178 }
1179 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1180 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1181 {
1182 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1183 AssertRC(rc);
1184 }
1185 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1186 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1187 {
1188 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1189 AssertRC(rc);
1190 }
1191 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1192 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1193 {
1194 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1195 AssertRC(rc);
1196 }
1197 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1198 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1199 {
1200 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1201 AssertRC(rc);
1202 }
1203 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1204 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1205 {
1206 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1207 AssertRC(rc);
1208 }
1209 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1210 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1211 {
1212 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1213 AssertRC(rc);
1214 }
1215 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1216 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1217 {
1218 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1219 AssertRC(rc);
1220 }
1221 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1222 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1223 {
1224 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1225 AssertRC(rc);
1226 }
1227
1228 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1229 }
1230}
1231
1232
1233/**
1234 * Reads VMCS fields into the VMXTRANSIENT structure.
1235 *
1236 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1237 * generating an optimized read sequences w/o any conditionals between in
1238 * non-strict builds.
1239 *
1240 * @tparam a_fReadMask The fields to read. One or more of the
1241 * HMVMX_READ_XXX fields ORed together.
1242 * @param pVCpu The cross context virtual CPU structure.
1243 * @param pVmxTransient The VMX-transient structure.
1244 */
1245template<uint32_t const a_fReadMask>
1246DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1247{
1248 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1249 | HMVMX_READ_EXIT_INSTR_LEN
1250 | HMVMX_READ_EXIT_INSTR_INFO
1251 | HMVMX_READ_IDT_VECTORING_INFO
1252 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1253 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1254 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1255 | HMVMX_READ_GUEST_LINEAR_ADDR
1256 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1257 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1258 )) == 0);
1259
1260 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1261 {
1262 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1263 {
1264 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1265 AssertRC(rc);
1266 }
1267 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1268 {
1269 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1270 AssertRC(rc);
1271 }
1272 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1273 {
1274 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1275 AssertRC(rc);
1276 }
1277 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1278 {
1279 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1280 AssertRC(rc);
1281 }
1282 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1283 {
1284 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1285 AssertRC(rc);
1286 }
1287 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1288 {
1289 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1290 AssertRC(rc);
1291 }
1292 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1293 {
1294 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1295 AssertRC(rc);
1296 }
1297 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1298 {
1299 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1300 AssertRC(rc);
1301 }
1302 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1303 {
1304 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1305 AssertRC(rc);
1306 }
1307 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1308 {
1309 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1310 AssertRC(rc);
1311 }
1312
1313 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1314 }
1315 else
1316 {
1317 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1318 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1319 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1320 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1321 }
1322}
1323
1324
1325#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1326/**
1327 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1328 *
1329 * @param pVCpu The cross context virtual CPU structure.
1330 * @param pVmxTransient The VMX-transient structure.
1331 */
1332static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1333{
1334 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1335 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1336 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1337 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1338 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1339 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1340 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1341 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1342 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1343 AssertRC(rc);
1344 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1345 | HMVMX_READ_EXIT_INSTR_LEN
1346 | HMVMX_READ_EXIT_INSTR_INFO
1347 | HMVMX_READ_IDT_VECTORING_INFO
1348 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1349 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1350 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1351 | HMVMX_READ_GUEST_LINEAR_ADDR
1352 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1353}
1354#endif
1355
1356/**
1357 * Verifies that our cached values of the VMCS fields are all consistent with
1358 * what's actually present in the VMCS.
1359 *
1360 * @returns VBox status code.
1361 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1362 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1363 * VMCS content. HMCPU error-field is
1364 * updated, see VMX_VCI_XXX.
1365 * @param pVCpu The cross context virtual CPU structure.
1366 * @param pVmcsInfo The VMCS info. object.
1367 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1368 */
1369static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1370{
1371 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1372
1373 uint32_t u32Val;
1374 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1375 AssertRC(rc);
1376 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1377 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1378 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1379 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1380
1381 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1382 AssertRC(rc);
1383 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1384 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1385 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1386 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1387
1388 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1389 AssertRC(rc);
1390 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1391 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1392 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1393 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1394
1395 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1396 AssertRC(rc);
1397 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1398 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1399 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1400 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1401
1402 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1403 {
1404 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1405 AssertRC(rc);
1406 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1407 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1408 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1409 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1410 }
1411
1412 uint64_t u64Val;
1413 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1414 {
1415 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1416 AssertRC(rc);
1417 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1418 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1419 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1420 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1421 }
1422
1423 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1424 AssertRC(rc);
1425 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1426 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1427 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1428 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1429
1430 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1431 AssertRC(rc);
1432 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1433 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1434 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1435 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1436
1437 NOREF(pcszVmcs);
1438 return VINF_SUCCESS;
1439}
1440
1441
1442/**
1443 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1444 * VMCS.
1445 *
1446 * This is typically required when the guest changes paging mode.
1447 *
1448 * @returns VBox status code.
1449 * @param pVCpu The cross context virtual CPU structure.
1450 * @param pVmxTransient The VMX-transient structure.
1451 *
1452 * @remarks Requires EFER.
1453 * @remarks No-long-jump zone!!!
1454 */
1455static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1456{
1457 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1458 {
1459 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1460 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1461
1462 /*
1463 * VM-entry controls.
1464 */
1465 {
1466 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1467 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1468
1469 /*
1470 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1471 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1472 *
1473 * For nested-guests, this is a mandatory VM-entry control. It's also
1474 * required because we do not want to leak host bits to the nested-guest.
1475 */
1476 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1477
1478 /*
1479 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1480 *
1481 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1482 * required to get the nested-guest working with hardware-assisted VMX execution.
1483 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1484 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1485 * here rather than while merging the guest VMCS controls.
1486 */
1487 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1488 {
1489 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1490 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1491 }
1492 else
1493 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1494
1495 /*
1496 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1497 *
1498 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1499 * regardless of whether the nested-guest VMCS specifies it because we are free to
1500 * load whatever MSRs we require and we do not need to modify the guest visible copy
1501 * of the VM-entry MSR load area.
1502 */
1503 if ( g_fHmVmxSupportsVmcsEfer
1504#ifndef IN_NEM_DARWIN
1505 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1506#endif
1507 )
1508 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1509 else
1510 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1511
1512 /*
1513 * The following should -not- be set (since we're not in SMM mode):
1514 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1515 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1516 */
1517
1518 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1519 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1520
1521 if ((fVal & fZap) == fVal)
1522 { /* likely */ }
1523 else
1524 {
1525 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1526 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1527 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1528 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1529 }
1530
1531 /* Commit it to the VMCS. */
1532 if (pVmcsInfo->u32EntryCtls != fVal)
1533 {
1534 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1535 AssertRC(rc);
1536 pVmcsInfo->u32EntryCtls = fVal;
1537 }
1538 }
1539
1540 /*
1541 * VM-exit controls.
1542 */
1543 {
1544 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1545 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1546
1547 /*
1548 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1549 * supported the 1-setting of this bit.
1550 *
1551 * For nested-guests, we set the "save debug controls" as the converse
1552 * "load debug controls" is mandatory for nested-guests anyway.
1553 */
1554 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1555
1556 /*
1557 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1558 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1559 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1560 * vmxHCExportHostMsrs().
1561 *
1562 * For nested-guests, we always set this bit as we do not support 32-bit
1563 * hosts.
1564 */
1565 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1566
1567#ifndef IN_NEM_DARWIN
1568 /*
1569 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1570 *
1571 * For nested-guests, we should use the "save IA32_EFER" control if we also
1572 * used the "load IA32_EFER" control while exporting VM-entry controls.
1573 */
1574 if ( g_fHmVmxSupportsVmcsEfer
1575 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1576 {
1577 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1578 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1579 }
1580#endif
1581
1582 /*
1583 * Enable saving of the VMX-preemption timer value on VM-exit.
1584 * For nested-guests, currently not exposed/used.
1585 */
1586 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1587 * the timer value. */
1588 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1589 {
1590 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1591 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1592 }
1593
1594 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1595 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1596
1597 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1598 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1599 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1600
1601 if ((fVal & fZap) == fVal)
1602 { /* likely */ }
1603 else
1604 {
1605 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1606 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1607 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1608 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1609 }
1610
1611 /* Commit it to the VMCS. */
1612 if (pVmcsInfo->u32ExitCtls != fVal)
1613 {
1614 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1615 AssertRC(rc);
1616 pVmcsInfo->u32ExitCtls = fVal;
1617 }
1618 }
1619
1620 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1621 }
1622 return VINF_SUCCESS;
1623}
1624
1625
1626/**
1627 * Sets the TPR threshold in the VMCS.
1628 *
1629 * @param pVCpu The cross context virtual CPU structure.
1630 * @param pVmcsInfo The VMCS info. object.
1631 * @param u32TprThreshold The TPR threshold (task-priority class only).
1632 */
1633DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1634{
1635 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1636 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1637 RT_NOREF(pVmcsInfo);
1638 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1639 AssertRC(rc);
1640}
1641
1642
1643/**
1644 * Exports the guest APIC TPR state into the VMCS.
1645 *
1646 * @param pVCpu The cross context virtual CPU structure.
1647 * @param pVmxTransient The VMX-transient structure.
1648 *
1649 * @remarks No-long-jump zone!!!
1650 */
1651static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1652{
1653 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1654 {
1655 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1656
1657 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1658 if (!pVmxTransient->fIsNestedGuest)
1659 {
1660 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1661 && APICIsEnabled(pVCpu))
1662 {
1663 /*
1664 * Setup TPR shadowing.
1665 */
1666 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1667 {
1668 bool fPendingIntr = false;
1669 uint8_t u8Tpr = 0;
1670 uint8_t u8PendingIntr = 0;
1671 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1672 AssertRC(rc);
1673
1674 /*
1675 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1676 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1677 * priority of the pending interrupt so we can deliver the interrupt. If there
1678 * are no interrupts pending, set threshold to 0 to not cause any
1679 * TPR-below-threshold VM-exits.
1680 */
1681 uint32_t u32TprThreshold = 0;
1682 if (fPendingIntr)
1683 {
1684 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1685 (which is the Task-Priority Class). */
1686 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1687 const uint8_t u8TprPriority = u8Tpr >> 4;
1688 if (u8PendingPriority <= u8TprPriority)
1689 u32TprThreshold = u8PendingPriority;
1690 }
1691
1692 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1693 }
1694 }
1695 }
1696 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1697 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1698 }
1699}
1700
1701
1702/**
1703 * Gets the guest interruptibility-state and updates related internal eflags
1704 * inhibition state.
1705 *
1706 * @returns Guest's interruptibility-state.
1707 * @param pVCpu The cross context virtual CPU structure.
1708 *
1709 * @remarks No-long-jump zone!!!
1710 */
1711static uint32_t vmxHCGetGuestIntrStateWithUpdate(PVMCPUCC pVCpu)
1712{
1713 uint32_t fIntrState;
1714
1715 /*
1716 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1717 */
1718 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1719 fIntrState = 0;
1720 else
1721 {
1722 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1723 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1724
1725 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1726 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1727 else
1728 {
1729 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1730
1731 /* Block-by-STI must not be set when interrupts are disabled. */
1732 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1733 }
1734 }
1735
1736 /*
1737 * Check if we should inhibit NMI delivery.
1738 */
1739 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1740 { /* likely */ }
1741 else
1742 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1743
1744 /*
1745 * Validate.
1746 */
1747 /* We don't support block-by-SMI yet.*/
1748 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1749
1750 return fIntrState;
1751}
1752
1753
1754/**
1755 * Exports the exception intercepts required for guest execution in the VMCS.
1756 *
1757 * @param pVCpu The cross context virtual CPU structure.
1758 * @param pVmxTransient The VMX-transient structure.
1759 *
1760 * @remarks No-long-jump zone!!!
1761 */
1762static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1763{
1764 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1765 {
1766 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1767 if ( !pVmxTransient->fIsNestedGuest
1768 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1769 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1770 else
1771 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1772
1773 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1774 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1775 }
1776}
1777
1778
1779/**
1780 * Exports the guest's RIP into the guest-state area in the VMCS.
1781 *
1782 * @param pVCpu The cross context virtual CPU structure.
1783 *
1784 * @remarks No-long-jump zone!!!
1785 */
1786static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1787{
1788 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1789 {
1790 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1791
1792 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1793 AssertRC(rc);
1794
1795 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1796 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1797 }
1798}
1799
1800
1801/**
1802 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1803 *
1804 * @param pVCpu The cross context virtual CPU structure.
1805 * @param pVmxTransient The VMX-transient structure.
1806 *
1807 * @remarks No-long-jump zone!!!
1808 */
1809static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1810{
1811 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1812 {
1813 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1814
1815 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits
1816 of RFLAGS are reserved (MBZ). We use bits 63:24 for internal purposes, so no need
1817 to assert this, the CPUMX86EFLAGS/CPUMX86RFLAGS union masks these off for us.
1818 Use 32-bit VMWRITE. */
1819 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
1820 Assert((fEFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
1821 AssertMsg(!(fEFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), ("%#x\n", fEFlags));
1822
1823#ifndef IN_NEM_DARWIN
1824 /*
1825 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1826 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1827 * can run the real-mode guest code under Virtual 8086 mode.
1828 */
1829 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1830 if (pVmcsInfo->RealMode.fRealOnV86Active)
1831 {
1832 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1833 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1834 Assert(!pVmxTransient->fIsNestedGuest);
1835 pVmcsInfo->RealMode.Eflags.u32 = fEFlags; /* Save the original eflags of the real-mode guest. */
1836 fEFlags |= X86_EFL_VM; /* Set the Virtual 8086 mode bit. */
1837 fEFlags &= ~X86_EFL_IOPL; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1838 }
1839#else
1840 RT_NOREF(pVmxTransient);
1841#endif
1842
1843 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags);
1844 AssertRC(rc);
1845
1846 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1847 Log4Func(("eflags=%#RX32\n", fEFlags));
1848 }
1849}
1850
1851
1852#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1853/**
1854 * Copies the nested-guest VMCS to the shadow VMCS.
1855 *
1856 * @returns VBox status code.
1857 * @param pVCpu The cross context virtual CPU structure.
1858 * @param pVmcsInfo The VMCS info. object.
1859 *
1860 * @remarks No-long-jump zone!!!
1861 */
1862static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1863{
1864 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1865 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1866
1867 /*
1868 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1869 * current VMCS, as we may try saving guest lazy MSRs.
1870 *
1871 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1872 * calling the import VMCS code which is currently performing the guest MSR reads
1873 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1874 * and the rest of the VMX leave session machinery.
1875 */
1876 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1877
1878 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1879 if (RT_SUCCESS(rc))
1880 {
1881 /*
1882 * Copy all guest read/write VMCS fields.
1883 *
1884 * We don't check for VMWRITE failures here for performance reasons and
1885 * because they are not expected to fail, barring irrecoverable conditions
1886 * like hardware errors.
1887 */
1888 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1889 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1890 {
1891 uint64_t u64Val;
1892 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1893 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1894 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1895 }
1896
1897 /*
1898 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1899 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1900 */
1901 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1902 {
1903 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1904 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1905 {
1906 uint64_t u64Val;
1907 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1908 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1909 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1910 }
1911 }
1912
1913 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1914 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1915 }
1916
1917 ASMSetFlags(fEFlags);
1918 return rc;
1919}
1920
1921
1922/**
1923 * Copies the shadow VMCS to the nested-guest VMCS.
1924 *
1925 * @returns VBox status code.
1926 * @param pVCpu The cross context virtual CPU structure.
1927 * @param pVmcsInfo The VMCS info. object.
1928 *
1929 * @remarks Called with interrupts disabled.
1930 */
1931static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1932{
1933 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1934 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1935 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1936
1937 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1938 if (RT_SUCCESS(rc))
1939 {
1940 /*
1941 * Copy guest read/write fields from the shadow VMCS.
1942 * Guest read-only fields cannot be modified, so no need to copy them.
1943 *
1944 * We don't check for VMREAD failures here for performance reasons and
1945 * because they are not expected to fail, barring irrecoverable conditions
1946 * like hardware errors.
1947 */
1948 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1949 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1950 {
1951 uint64_t u64Val;
1952 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1953 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1954 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1955 }
1956
1957 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1958 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1959 }
1960 return rc;
1961}
1962
1963
1964/**
1965 * Enables VMCS shadowing for the given VMCS info. object.
1966 *
1967 * @param pVCpu The cross context virtual CPU structure.
1968 * @param pVmcsInfo The VMCS info. object.
1969 *
1970 * @remarks No-long-jump zone!!!
1971 */
1972static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1973{
1974 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1975 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1976 {
1977 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1978 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1979 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1980 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1981 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1982 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1983 Log4Func(("Enabled\n"));
1984 }
1985}
1986
1987
1988/**
1989 * Disables VMCS shadowing for the given VMCS info. object.
1990 *
1991 * @param pVCpu The cross context virtual CPU structure.
1992 * @param pVmcsInfo The VMCS info. object.
1993 *
1994 * @remarks No-long-jump zone!!!
1995 */
1996static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1997{
1998 /*
1999 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
2000 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
2001 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2002 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2003 *
2004 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2005 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2006 */
2007 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2008 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2009 {
2010 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2011 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2012 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2013 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2014 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2015 Log4Func(("Disabled\n"));
2016 }
2017}
2018#endif
2019
2020
2021/**
2022 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2023 *
2024 * The guest FPU state is always pre-loaded hence we don't need to bother about
2025 * sharing FPU related CR0 bits between the guest and host.
2026 *
2027 * @returns VBox status code.
2028 * @param pVCpu The cross context virtual CPU structure.
2029 * @param pVmxTransient The VMX-transient structure.
2030 *
2031 * @remarks No-long-jump zone!!!
2032 */
2033static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2034{
2035 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2036 {
2037 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2038 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2039
2040 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2041 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2042 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2043 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2044 else
2045 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2046
2047 if (!pVmxTransient->fIsNestedGuest)
2048 {
2049 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2050 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2051 uint64_t const u64ShadowCr0 = u64GuestCr0;
2052 Assert(!RT_HI_U32(u64GuestCr0));
2053
2054 /*
2055 * Setup VT-x's view of the guest CR0.
2056 */
2057 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2058 if (VM_IS_VMX_NESTED_PAGING(pVM))
2059 {
2060#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2061 if (CPUMIsGuestPagingEnabled(pVCpu))
2062 {
2063 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2064 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2065 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2066 }
2067 else
2068 {
2069 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2070 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2071 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2072 }
2073
2074 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2075 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2076 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2077#endif
2078 }
2079 else
2080 {
2081 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2082 u64GuestCr0 |= X86_CR0_WP;
2083 }
2084
2085 /*
2086 * Guest FPU bits.
2087 *
2088 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2089 * using CR0.TS.
2090 *
2091 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2092 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2093 */
2094 u64GuestCr0 |= X86_CR0_NE;
2095
2096 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2097 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2098
2099 /*
2100 * Update exception intercepts.
2101 */
2102 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2103#ifndef IN_NEM_DARWIN
2104 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2105 {
2106 Assert(PDMVmmDevHeapIsEnabled(pVM));
2107 Assert(pVM->hm.s.vmx.pRealModeTSS);
2108 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2109 }
2110 else
2111#endif
2112 {
2113 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2114 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2115 if (fInterceptMF)
2116 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2117 }
2118
2119 /* Additional intercepts for debugging, define these yourself explicitly. */
2120#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2121 uXcptBitmap |= 0
2122 | RT_BIT(X86_XCPT_BP)
2123 | RT_BIT(X86_XCPT_DE)
2124 | RT_BIT(X86_XCPT_NM)
2125 | RT_BIT(X86_XCPT_TS)
2126 | RT_BIT(X86_XCPT_UD)
2127 | RT_BIT(X86_XCPT_NP)
2128 | RT_BIT(X86_XCPT_SS)
2129 | RT_BIT(X86_XCPT_GP)
2130 | RT_BIT(X86_XCPT_PF)
2131 | RT_BIT(X86_XCPT_MF)
2132 ;
2133#elif defined(HMVMX_ALWAYS_TRAP_PF)
2134 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2135#endif
2136 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2137 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2138 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2139 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2140 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2141
2142 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2143 u64GuestCr0 |= fSetCr0;
2144 u64GuestCr0 &= fZapCr0;
2145 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2146
2147 Assert(!RT_HI_U32(u64GuestCr0));
2148 Assert(u64GuestCr0 & X86_CR0_NE);
2149
2150 /* Commit the CR0 and related fields to the guest VMCS. */
2151 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2152 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2153 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2154 {
2155 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2156 AssertRC(rc);
2157 }
2158 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2159 {
2160 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2161 AssertRC(rc);
2162 }
2163
2164 /* Update our caches. */
2165 pVmcsInfo->u32ProcCtls = uProcCtls;
2166 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2167
2168 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2169 }
2170 else
2171 {
2172 /*
2173 * With nested-guests, we may have extended the guest/host mask here since we
2174 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2175 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2176 * originally supplied. We must copy those bits from the nested-guest CR0 into
2177 * the nested-guest CR0 read-shadow.
2178 */
2179 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2180 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2181 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2182
2183 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2184 u64GuestCr0 |= fSetCr0;
2185 u64GuestCr0 &= fZapCr0;
2186 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2187
2188 Assert(!RT_HI_U32(u64GuestCr0));
2189 Assert(u64GuestCr0 & X86_CR0_NE);
2190
2191 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2192 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2193 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2194
2195 Log4Func(("cr0=%#RX64 shadow=%#RX64 vmcs_read_shw=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0,
2196 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u, fSetCr0, fZapCr0));
2197 }
2198
2199 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2200 }
2201
2202 return VINF_SUCCESS;
2203}
2204
2205
2206/**
2207 * Exports the guest control registers (CR3, CR4) into the guest-state area
2208 * in the VMCS.
2209 *
2210 * @returns VBox strict status code.
2211 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2212 * without unrestricted guest access and the VMMDev is not presently
2213 * mapped (e.g. EFI32).
2214 *
2215 * @param pVCpu The cross context virtual CPU structure.
2216 * @param pVmxTransient The VMX-transient structure.
2217 *
2218 * @remarks No-long-jump zone!!!
2219 */
2220static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2221{
2222 int rc = VINF_SUCCESS;
2223 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2224
2225 /*
2226 * Guest CR2.
2227 * It's always loaded in the assembler code. Nothing to do here.
2228 */
2229
2230 /*
2231 * Guest CR3.
2232 */
2233 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2234 {
2235 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2236
2237 if (VM_IS_VMX_NESTED_PAGING(pVM))
2238 {
2239#ifndef IN_NEM_DARWIN
2240 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2241 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2242
2243 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2244 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2245 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2246 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2247
2248 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2249 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2250 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2251
2252 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2253 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2254 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2255 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2256 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2257 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2258 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2259
2260 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2261 AssertRC(rc);
2262#endif
2263
2264 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2265 uint64_t u64GuestCr3 = pCtx->cr3;
2266 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2267 || CPUMIsGuestPagingEnabledEx(pCtx))
2268 {
2269 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2270 if (CPUMIsGuestInPAEModeEx(pCtx))
2271 {
2272 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2273 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2274 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2275 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2276 }
2277
2278 /*
2279 * The guest's view of its CR3 is unblemished with nested paging when the
2280 * guest is using paging or we have unrestricted guest execution to handle
2281 * the guest when it's not using paging.
2282 */
2283 }
2284#ifndef IN_NEM_DARWIN
2285 else
2286 {
2287 /*
2288 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2289 * thinks it accesses physical memory directly, we use our identity-mapped
2290 * page table to map guest-linear to guest-physical addresses. EPT takes care
2291 * of translating it to host-physical addresses.
2292 */
2293 RTGCPHYS GCPhys;
2294 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2295
2296 /* We obtain it here every time as the guest could have relocated this PCI region. */
2297 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2298 if (RT_SUCCESS(rc))
2299 { /* likely */ }
2300 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2301 {
2302 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2303 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2304 }
2305 else
2306 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2307
2308 u64GuestCr3 = GCPhys;
2309 }
2310#endif
2311
2312 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2313 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2314 AssertRC(rc);
2315 }
2316 else
2317 {
2318 Assert(!pVmxTransient->fIsNestedGuest);
2319 /* Non-nested paging case, just use the hypervisor's CR3. */
2320 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2321
2322 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2323 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2324 AssertRC(rc);
2325 }
2326
2327 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2328 }
2329
2330 /*
2331 * Guest CR4.
2332 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2333 */
2334 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2335 {
2336 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2337 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2338
2339 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2340 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2341
2342 /*
2343 * With nested-guests, we may have extended the guest/host mask here (since we
2344 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2345 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2346 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2347 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2348 */
2349 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2350 uint64_t u64GuestCr4 = pCtx->cr4;
2351 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2352 ? pCtx->cr4
2353 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2354 Assert(!RT_HI_U32(u64GuestCr4));
2355
2356#ifndef IN_NEM_DARWIN
2357 /*
2358 * Setup VT-x's view of the guest CR4.
2359 *
2360 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2361 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2362 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2363 *
2364 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2365 */
2366 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2367 {
2368 Assert(pVM->hm.s.vmx.pRealModeTSS);
2369 Assert(PDMVmmDevHeapIsEnabled(pVM));
2370 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2371 }
2372#endif
2373
2374 if (VM_IS_VMX_NESTED_PAGING(pVM))
2375 {
2376 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2377 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2378 {
2379 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2380 u64GuestCr4 |= X86_CR4_PSE;
2381 /* Our identity mapping is a 32-bit page directory. */
2382 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2383 }
2384 /* else use guest CR4.*/
2385 }
2386 else
2387 {
2388 Assert(!pVmxTransient->fIsNestedGuest);
2389
2390 /*
2391 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2392 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2393 */
2394 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2395 {
2396 case PGMMODE_REAL: /* Real-mode. */
2397 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2398 case PGMMODE_32_BIT: /* 32-bit paging. */
2399 {
2400 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2401 break;
2402 }
2403
2404 case PGMMODE_PAE: /* PAE paging. */
2405 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2406 {
2407 u64GuestCr4 |= X86_CR4_PAE;
2408 break;
2409 }
2410
2411 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2412 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2413 {
2414#ifdef VBOX_WITH_64_BITS_GUESTS
2415 /* For our assumption in vmxHCShouldSwapEferMsr. */
2416 Assert(u64GuestCr4 & X86_CR4_PAE);
2417 break;
2418#endif
2419 }
2420 default:
2421 AssertFailed();
2422 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2423 }
2424 }
2425
2426 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2427 u64GuestCr4 |= fSetCr4;
2428 u64GuestCr4 &= fZapCr4;
2429
2430 Assert(!RT_HI_U32(u64GuestCr4));
2431 Assert(u64GuestCr4 & X86_CR4_VMXE);
2432
2433 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2434 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2435 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2436
2437#ifndef IN_NEM_DARWIN
2438 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2439 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2440 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2441 {
2442 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2443 hmR0VmxUpdateStartVmFunction(pVCpu);
2444 }
2445#endif
2446
2447 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2448
2449 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2450 }
2451 return rc;
2452}
2453
2454
2455#ifdef VBOX_STRICT
2456/**
2457 * Strict function to validate segment registers.
2458 *
2459 * @param pVCpu The cross context virtual CPU structure.
2460 * @param pVmcsInfo The VMCS info. object.
2461 *
2462 * @remarks Will import guest CR0 on strict builds during validation of
2463 * segments.
2464 */
2465static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2466{
2467 /*
2468 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2469 *
2470 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2471 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2472 * unusable bit and doesn't change the guest-context value.
2473 */
2474 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2475 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2476 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2477 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2478 && ( !CPUMIsGuestInRealModeEx(pCtx)
2479 && !CPUMIsGuestInV86ModeEx(pCtx)))
2480 {
2481 /* Protected mode checks */
2482 /* CS */
2483 Assert(pCtx->cs.Attr.n.u1Present);
2484 Assert(!(pCtx->cs.Attr.u & 0xf00));
2485 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2486 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2487 || !(pCtx->cs.Attr.n.u1Granularity));
2488 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2489 || (pCtx->cs.Attr.n.u1Granularity));
2490 /* CS cannot be loaded with NULL in protected mode. */
2491 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2492 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2493 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2494 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2495 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2496 else
2497 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2498 /* SS */
2499 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2500 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2501 if ( !(pCtx->cr0 & X86_CR0_PE)
2502 || pCtx->cs.Attr.n.u4Type == 3)
2503 {
2504 Assert(!pCtx->ss.Attr.n.u2Dpl);
2505 }
2506 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2507 {
2508 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2509 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2510 Assert(pCtx->ss.Attr.n.u1Present);
2511 Assert(!(pCtx->ss.Attr.u & 0xf00));
2512 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2513 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2514 || !(pCtx->ss.Attr.n.u1Granularity));
2515 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2516 || (pCtx->ss.Attr.n.u1Granularity));
2517 }
2518 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2519 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2520 {
2521 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2522 Assert(pCtx->ds.Attr.n.u1Present);
2523 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2524 Assert(!(pCtx->ds.Attr.u & 0xf00));
2525 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2526 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2527 || !(pCtx->ds.Attr.n.u1Granularity));
2528 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2529 || (pCtx->ds.Attr.n.u1Granularity));
2530 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2531 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2532 }
2533 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2534 {
2535 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2536 Assert(pCtx->es.Attr.n.u1Present);
2537 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2538 Assert(!(pCtx->es.Attr.u & 0xf00));
2539 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2540 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2541 || !(pCtx->es.Attr.n.u1Granularity));
2542 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2543 || (pCtx->es.Attr.n.u1Granularity));
2544 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2545 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2546 }
2547 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2548 {
2549 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2550 Assert(pCtx->fs.Attr.n.u1Present);
2551 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2552 Assert(!(pCtx->fs.Attr.u & 0xf00));
2553 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2554 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2555 || !(pCtx->fs.Attr.n.u1Granularity));
2556 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2557 || (pCtx->fs.Attr.n.u1Granularity));
2558 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2559 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2560 }
2561 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2562 {
2563 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2564 Assert(pCtx->gs.Attr.n.u1Present);
2565 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2566 Assert(!(pCtx->gs.Attr.u & 0xf00));
2567 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2568 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2569 || !(pCtx->gs.Attr.n.u1Granularity));
2570 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2571 || (pCtx->gs.Attr.n.u1Granularity));
2572 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2573 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2574 }
2575 /* 64-bit capable CPUs. */
2576 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2577 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2578 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2579 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2580 }
2581 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2582 || ( CPUMIsGuestInRealModeEx(pCtx)
2583 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2584 {
2585 /* Real and v86 mode checks. */
2586 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2587 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2588#ifndef IN_NEM_DARWIN
2589 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2590 {
2591 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2592 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2593 }
2594 else
2595#endif
2596 {
2597 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2598 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2599 }
2600
2601 /* CS */
2602 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2603 Assert(pCtx->cs.u32Limit == 0xffff);
2604 AssertMsg(u32CSAttr == 0xf3, ("cs=%#x %#x ", pCtx->cs.Sel, u32CSAttr));
2605 /* SS */
2606 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2607 Assert(pCtx->ss.u32Limit == 0xffff);
2608 Assert(u32SSAttr == 0xf3);
2609 /* DS */
2610 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2611 Assert(pCtx->ds.u32Limit == 0xffff);
2612 Assert(u32DSAttr == 0xf3);
2613 /* ES */
2614 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2615 Assert(pCtx->es.u32Limit == 0xffff);
2616 Assert(u32ESAttr == 0xf3);
2617 /* FS */
2618 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2619 Assert(pCtx->fs.u32Limit == 0xffff);
2620 Assert(u32FSAttr == 0xf3);
2621 /* GS */
2622 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2623 Assert(pCtx->gs.u32Limit == 0xffff);
2624 Assert(u32GSAttr == 0xf3);
2625 /* 64-bit capable CPUs. */
2626 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2627 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2628 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2629 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2630 }
2631}
2632#endif /* VBOX_STRICT */
2633
2634
2635/**
2636 * Exports a guest segment register into the guest-state area in the VMCS.
2637 *
2638 * @returns VBox status code.
2639 * @param pVCpu The cross context virtual CPU structure.
2640 * @param pVmcsInfo The VMCS info. object.
2641 * @param iSegReg The segment register number (X86_SREG_XXX).
2642 * @param pSelReg Pointer to the segment selector.
2643 *
2644 * @remarks No-long-jump zone!!!
2645 */
2646static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2647{
2648 Assert(iSegReg < X86_SREG_COUNT);
2649
2650 uint32_t u32Access = pSelReg->Attr.u;
2651#ifndef IN_NEM_DARWIN
2652 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2653#endif
2654 {
2655 /*
2656 * The way to differentiate between whether this is really a null selector or was just
2657 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2658 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2659 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2660 * NULL selectors loaded in protected-mode have their attribute as 0.
2661 */
2662 if (u32Access)
2663 { }
2664 else
2665 u32Access = X86DESCATTR_UNUSABLE;
2666 }
2667#ifndef IN_NEM_DARWIN
2668 else
2669 {
2670 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2671 u32Access = 0xf3;
2672 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2673 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2674 RT_NOREF_PV(pVCpu);
2675 }
2676#else
2677 RT_NOREF(pVmcsInfo);
2678#endif
2679
2680 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2681 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2682 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2683
2684 /*
2685 * Commit it to the VMCS.
2686 */
2687 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2688 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2689 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2690 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2691 return VINF_SUCCESS;
2692}
2693
2694
2695/**
2696 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2697 * area in the VMCS.
2698 *
2699 * @returns VBox status code.
2700 * @param pVCpu The cross context virtual CPU structure.
2701 * @param pVmxTransient The VMX-transient structure.
2702 *
2703 * @remarks Will import guest CR0 on strict builds during validation of
2704 * segments.
2705 * @remarks No-long-jump zone!!!
2706 */
2707static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2708{
2709 int rc = VERR_INTERNAL_ERROR_5;
2710#ifndef IN_NEM_DARWIN
2711 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2712#endif
2713 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2714 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2715#ifndef IN_NEM_DARWIN
2716 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2717#endif
2718
2719 /*
2720 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2721 */
2722 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2723 {
2724 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2725 {
2726 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2727#ifndef IN_NEM_DARWIN
2728 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2729 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2730#endif
2731 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2732 AssertRC(rc);
2733 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2734 }
2735
2736 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2737 {
2738 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2739#ifndef IN_NEM_DARWIN
2740 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2741 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2742#endif
2743 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2744 AssertRC(rc);
2745 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2746 }
2747
2748 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2749 {
2750 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2751#ifndef IN_NEM_DARWIN
2752 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2753 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2754#endif
2755 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2756 AssertRC(rc);
2757 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2758 }
2759
2760 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2761 {
2762 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2763#ifndef IN_NEM_DARWIN
2764 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2765 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2766#endif
2767 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2768 AssertRC(rc);
2769 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2770 }
2771
2772 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2773 {
2774 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2775#ifndef IN_NEM_DARWIN
2776 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2777 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2778#endif
2779 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2780 AssertRC(rc);
2781 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2782 }
2783
2784 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2785 {
2786 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2787#ifndef IN_NEM_DARWIN
2788 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2789 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2790#endif
2791 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2792 AssertRC(rc);
2793 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2794 }
2795
2796#ifdef VBOX_STRICT
2797 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2798#endif
2799 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2800 pCtx->cs.Attr.u));
2801 }
2802
2803 /*
2804 * Guest TR.
2805 */
2806 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2807 {
2808 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2809
2810 /*
2811 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2812 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2813 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2814 */
2815 uint16_t u16Sel;
2816 uint32_t u32Limit;
2817 uint64_t u64Base;
2818 uint32_t u32AccessRights;
2819#ifndef IN_NEM_DARWIN
2820 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2821#endif
2822 {
2823 u16Sel = pCtx->tr.Sel;
2824 u32Limit = pCtx->tr.u32Limit;
2825 u64Base = pCtx->tr.u64Base;
2826 u32AccessRights = pCtx->tr.Attr.u;
2827 }
2828#ifndef IN_NEM_DARWIN
2829 else
2830 {
2831 Assert(!pVmxTransient->fIsNestedGuest);
2832 Assert(pVM->hm.s.vmx.pRealModeTSS);
2833 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2834
2835 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2836 RTGCPHYS GCPhys;
2837 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2838 AssertRCReturn(rc, rc);
2839
2840 X86DESCATTR DescAttr;
2841 DescAttr.u = 0;
2842 DescAttr.n.u1Present = 1;
2843 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2844
2845 u16Sel = 0;
2846 u32Limit = HM_VTX_TSS_SIZE;
2847 u64Base = GCPhys;
2848 u32AccessRights = DescAttr.u;
2849 }
2850#endif
2851
2852 /* Validate. */
2853 Assert(!(u16Sel & RT_BIT(2)));
2854 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2855 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2856 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2857 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2858 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2859 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2860 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2861 Assert( (u32Limit & 0xfff) == 0xfff
2862 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2863 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2864 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2865
2866 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2867 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2868 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2869 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2870
2871 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2872 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2873 }
2874
2875 /*
2876 * Guest GDTR.
2877 */
2878 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2879 {
2880 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2881
2882 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2883 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2884
2885 /* Validate. */
2886 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2887
2888 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2889 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2890 }
2891
2892 /*
2893 * Guest LDTR.
2894 */
2895 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2896 {
2897 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2898
2899 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2900 uint32_t u32Access;
2901 if ( !pVmxTransient->fIsNestedGuest
2902 && !pCtx->ldtr.Attr.u)
2903 u32Access = X86DESCATTR_UNUSABLE;
2904 else
2905 u32Access = pCtx->ldtr.Attr.u;
2906
2907 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2908 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2909 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2910 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2911
2912 /* Validate. */
2913 if (!(u32Access & X86DESCATTR_UNUSABLE))
2914 {
2915 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2916 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2917 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2918 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2919 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2920 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2921 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2922 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2923 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2924 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2925 }
2926
2927 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2928 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2929 }
2930
2931 /*
2932 * Guest IDTR.
2933 */
2934 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2935 {
2936 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2937
2938 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2939 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2940
2941 /* Validate. */
2942 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2943
2944 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2945 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2946 }
2947
2948 return VINF_SUCCESS;
2949}
2950
2951
2952/**
2953 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2954 * VM-exit interruption info type.
2955 *
2956 * @returns The IEM exception flags.
2957 * @param uVector The event vector.
2958 * @param uVmxEventType The VMX event type.
2959 *
2960 * @remarks This function currently only constructs flags required for
2961 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2962 * and CR2 aspects of an exception are not included).
2963 */
2964static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2965{
2966 uint32_t fIemXcptFlags;
2967 switch (uVmxEventType)
2968 {
2969 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2970 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2971 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2972 break;
2973
2974 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2975 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2976 break;
2977
2978 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2979 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2980 break;
2981
2982 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2983 {
2984 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2985 if (uVector == X86_XCPT_BP)
2986 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2987 else if (uVector == X86_XCPT_OF)
2988 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2989 else
2990 {
2991 fIemXcptFlags = 0;
2992 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2993 }
2994 break;
2995 }
2996
2997 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2998 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2999 break;
3000
3001 default:
3002 fIemXcptFlags = 0;
3003 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3004 break;
3005 }
3006 return fIemXcptFlags;
3007}
3008
3009
3010/**
3011 * Sets an event as a pending event to be injected into the guest.
3012 *
3013 * @param pVCpu The cross context virtual CPU structure.
3014 * @param u32IntInfo The VM-entry interruption-information field.
3015 * @param cbInstr The VM-entry instruction length in bytes (for
3016 * software interrupts, exceptions and privileged
3017 * software exceptions).
3018 * @param u32ErrCode The VM-entry exception error code.
3019 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3020 * page-fault.
3021 */
3022DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3023 RTGCUINTPTR GCPtrFaultAddress)
3024{
3025 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3026 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3027 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3028 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3029 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3030 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3031}
3032
3033
3034/**
3035 * Sets an external interrupt as pending-for-injection into the VM.
3036 *
3037 * @param pVCpu The cross context virtual CPU structure.
3038 * @param u8Interrupt The external interrupt vector.
3039 */
3040DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3041{
3042 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3043 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3044 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3045 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3046 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3047 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
3048}
3049
3050
3051/**
3052 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3053 *
3054 * @param pVCpu The cross context virtual CPU structure.
3055 */
3056DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3057{
3058 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3059 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3060 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3061 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3062 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3063 Log4Func(("NMI pending injection\n"));
3064}
3065
3066
3067/**
3068 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3069 *
3070 * @param pVCpu The cross context virtual CPU structure.
3071 */
3072DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3073{
3074 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3075 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3076 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3077 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3078 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3079}
3080
3081
3082/**
3083 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3084 *
3085 * @param pVCpu The cross context virtual CPU structure.
3086 */
3087DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3088{
3089 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3090 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3091 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3092 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3093 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3094}
3095
3096
3097/**
3098 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3099 *
3100 * @param pVCpu The cross context virtual CPU structure.
3101 */
3102DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3103{
3104 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3105 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3106 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3107 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3108 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3109}
3110
3111
3112#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3113/**
3114 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3115 *
3116 * @param pVCpu The cross context virtual CPU structure.
3117 * @param u32ErrCode The error code for the general-protection exception.
3118 */
3119DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3120{
3121 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3122 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3123 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3124 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3125 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3126}
3127
3128
3129/**
3130 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3131 *
3132 * @param pVCpu The cross context virtual CPU structure.
3133 * @param u32ErrCode The error code for the stack exception.
3134 */
3135DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3136{
3137 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3138 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3139 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3140 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3141 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3142}
3143#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3144
3145
3146/**
3147 * Fixes up attributes for the specified segment register.
3148 *
3149 * @param pVCpu The cross context virtual CPU structure.
3150 * @param pSelReg The segment register that needs fixing.
3151 * @param pszRegName The register name (for logging and assertions).
3152 */
3153static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3154{
3155 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3156
3157 /*
3158 * If VT-x marks the segment as unusable, most other bits remain undefined:
3159 * - For CS the L, D and G bits have meaning.
3160 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3161 * - For the remaining data segments no bits are defined.
3162 *
3163 * The present bit and the unusable bit has been observed to be set at the
3164 * same time (the selector was supposed to be invalid as we started executing
3165 * a V8086 interrupt in ring-0).
3166 *
3167 * What should be important for the rest of the VBox code, is that the P bit is
3168 * cleared. Some of the other VBox code recognizes the unusable bit, but
3169 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3170 * safe side here, we'll strip off P and other bits we don't care about. If
3171 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3172 *
3173 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3174 */
3175#ifdef VBOX_STRICT
3176 uint32_t const uAttr = pSelReg->Attr.u;
3177#endif
3178
3179 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3180 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3181 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3182
3183#ifdef VBOX_STRICT
3184# ifndef IN_NEM_DARWIN
3185 VMMRZCallRing3Disable(pVCpu);
3186# endif
3187 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3188# ifdef DEBUG_bird
3189 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3190 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3191 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3192# endif
3193# ifndef IN_NEM_DARWIN
3194 VMMRZCallRing3Enable(pVCpu);
3195# endif
3196 NOREF(uAttr);
3197#endif
3198 RT_NOREF2(pVCpu, pszRegName);
3199}
3200
3201
3202/**
3203 * Imports a guest segment register from the current VMCS into the guest-CPU
3204 * context.
3205 *
3206 * @param pVCpu The cross context virtual CPU structure.
3207 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3208 *
3209 * @remarks Called with interrupts and/or preemption disabled.
3210 */
3211template<uint32_t const a_iSegReg>
3212DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3213{
3214 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3215 /* Check that the macros we depend upon here and in the export parenter function works: */
3216#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3217 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3218 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3219 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3220 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3221 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3222 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3223 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3224 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3225 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3226 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3227
3228 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3229
3230 uint16_t u16Sel;
3231 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3232 pSelReg->Sel = u16Sel;
3233 pSelReg->ValidSel = u16Sel;
3234
3235 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3236 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3237
3238 uint32_t u32Attr;
3239 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3240 pSelReg->Attr.u = u32Attr;
3241 if (u32Attr & X86DESCATTR_UNUSABLE)
3242 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3243
3244 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3245}
3246
3247
3248/**
3249 * Imports the guest LDTR from the VMCS into the guest-CPU context.
3250 *
3251 * @param pVCpu The cross context virtual CPU structure.
3252 *
3253 * @remarks Called with interrupts and/or preemption disabled.
3254 */
3255DECL_FORCE_INLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3256{
3257 uint16_t u16Sel;
3258 uint64_t u64Base;
3259 uint32_t u32Limit, u32Attr;
3260 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3261 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3262 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3263 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3264
3265 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3266 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3267 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3268 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3269 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3270 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3271 if (u32Attr & X86DESCATTR_UNUSABLE)
3272 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3273}
3274
3275
3276/**
3277 * Imports the guest TR from the VMCS into the guest-CPU context.
3278 *
3279 * @param pVCpu The cross context virtual CPU structure.
3280 *
3281 * @remarks Called with interrupts and/or preemption disabled.
3282 */
3283DECL_FORCE_INLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3284{
3285 uint16_t u16Sel;
3286 uint64_t u64Base;
3287 uint32_t u32Limit, u32Attr;
3288 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3289 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3290 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3291 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3292
3293 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3294 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3295 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3296 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3297 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3298 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3299 /* TR is the only selector that can never be unusable. */
3300 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3301}
3302
3303
3304/**
3305 * Core: Imports the guest RIP from the VMCS into the guest-CPU context.
3306 *
3307 * @returns The RIP value.
3308 * @param pVCpu The cross context virtual CPU structure.
3309 *
3310 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3311 * @remarks Do -not- call this function directly!
3312 */
3313DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3314{
3315 uint64_t u64Val;
3316 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3317 AssertRC(rc);
3318
3319 pVCpu->cpum.GstCtx.rip = u64Val;
3320
3321 return u64Val;
3322}
3323
3324
3325/**
3326 * Imports the guest RIP from the VMCS into the guest-CPU context.
3327 *
3328 * @param pVCpu The cross context virtual CPU structure.
3329 *
3330 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3331 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3332 * instead!!!
3333 */
3334DECL_FORCE_INLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3335{
3336 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3337 {
3338 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3339 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3340 }
3341}
3342
3343
3344/**
3345 * Core: Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3346 *
3347 * @param pVCpu The cross context virtual CPU structure.
3348 * @param pVmcsInfo The VMCS info. object.
3349 *
3350 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3351 * @remarks Do -not- call this function directly!
3352 */
3353DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3354{
3355 uint64_t fRFlags;
3356 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &fRFlags);
3357 AssertRC(rc);
3358
3359 Assert((fRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
3360 Assert((fRFlags & ~(uint64_t)(X86_EFL_1 | X86_EFL_LIVE_MASK)) == 0);
3361
3362 pVCpu->cpum.GstCtx.rflags.u = fRFlags;
3363#ifndef IN_NEM_DARWIN
3364 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3365 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
3366 { /* mostly likely */ }
3367 else
3368 {
3369 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3370 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3371 }
3372#else
3373 RT_NOREF(pVmcsInfo);
3374#endif
3375}
3376
3377
3378/**
3379 * Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3380 *
3381 * @param pVCpu The cross context virtual CPU structure.
3382 * @param pVmcsInfo The VMCS info. object.
3383 *
3384 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3385 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3386 * instead!!!
3387 */
3388DECL_FORCE_INLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3389{
3390 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3391 {
3392 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3393 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3394 }
3395}
3396
3397
3398#ifndef IN_NEM_DARWIN
3399/**
3400 * Imports the guest TSX AUX and certain other MSRs from the VMCS into the guest-CPU
3401 * context.
3402 *
3403 * The other MSRs are in the VM-exit MSR-store.
3404 *
3405 * @returns VBox status code.
3406 * @param pVCpu The cross context virtual CPU structure.
3407 * @param pVmcsInfo The VMCS info. object.
3408 * @param fEFlags Saved EFLAGS for restoring the interrupt flag (in case of
3409 * unexpected errors). Ignored in NEM/darwin context.
3410 */
3411DECL_FORCE_INLINE(int) vmxHCImportGuestTscAuxAndOtherMsrs(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3412{
3413 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3414 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3415 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3416 Assert(pMsrs);
3417 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3418 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3419 for (uint32_t i = 0; i < cMsrs; i++)
3420 {
3421 uint32_t const idMsr = pMsrs[i].u32Msr;
3422 switch (idMsr)
3423 {
3424 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3425 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3426 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3427 default:
3428 {
3429 uint32_t idxLbrMsr;
3430 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3431 if (VM_IS_VMX_LBR(pVM))
3432 {
3433 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3434 {
3435 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3436 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3437 break;
3438 }
3439 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3440 {
3441 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3442 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3443 break;
3444 }
3445 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3446 {
3447 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3448 break;
3449 }
3450 /* Fallthru (no break) */
3451 }
3452 pVCpu->cpum.GstCtx.fExtrn = 0;
3453 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3454 ASMSetFlags(fEFlags);
3455 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3456 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3457 }
3458 }
3459 }
3460 return VINF_SUCCESS;
3461}
3462#endif /* !IN_NEM_DARWIN */
3463
3464
3465/**
3466 * Imports the guest CR0 from the VMCS into the guest-CPU context.
3467 *
3468 * @param pVCpu The cross context virtual CPU structure.
3469 * @param pVmcsInfo The VMCS info. object.
3470 */
3471DECL_FORCE_INLINE(void) vmxHCImportGuestCr0(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3472{
3473 uint64_t u64Cr0;
3474 uint64_t u64Shadow;
3475 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3476 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3477#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3478 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3479 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3480#else
3481 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
3482 {
3483 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3484 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3485 }
3486 else
3487 {
3488 /*
3489 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3490 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3491 * re-construct CR0. See @bugref{9180#c95} for details.
3492 */
3493 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3494 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3495 u64Cr0 = (u64Cr0 & ~(pVmcsInfoGst->u64Cr0Mask & pVmcsNstGst->u64Cr0Mask.u))
3496 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3497 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3498 Assert(u64Cr0 & X86_CR0_NE);
3499 }
3500#endif
3501
3502#ifndef IN_NEM_DARWIN
3503 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3504#endif
3505 CPUMSetGuestCR0(pVCpu, u64Cr0);
3506#ifndef IN_NEM_DARWIN
3507 VMMRZCallRing3Enable(pVCpu);
3508#endif
3509}
3510
3511
3512/**
3513 * Imports the guest CR3 from the VMCS into the guest-CPU context.
3514 *
3515 * @param pVCpu The cross context virtual CPU structure.
3516 */
3517DECL_FORCE_INLINE(void) vmxHCImportGuestCr3(PVMCPUCC pVCpu)
3518{
3519 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3520 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3521
3522 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3523 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3524 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3525 && CPUMIsGuestPagingEnabledEx(pCtx)))
3526 {
3527 uint64_t u64Cr3;
3528 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3529 if (pCtx->cr3 != u64Cr3)
3530 {
3531 pCtx->cr3 = u64Cr3;
3532 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3533 }
3534
3535 /*
3536 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3537 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3538 */
3539 if (CPUMIsGuestInPAEModeEx(pCtx))
3540 {
3541 X86PDPE aPaePdpes[4];
3542 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3543 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3544 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3545 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3546 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3547 {
3548 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3549 /* PGM now updates PAE PDPTEs while updating CR3. */
3550 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3551 }
3552 }
3553 }
3554}
3555
3556
3557/**
3558 * Imports the guest CR4 from the VMCS into the guest-CPU context.
3559 *
3560 * @param pVCpu The cross context virtual CPU structure.
3561 * @param pVmcsInfo The VMCS info. object.
3562 */
3563DECL_FORCE_INLINE(void) vmxHCImportGuestCr4(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3564{
3565 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3566 uint64_t u64Cr4;
3567 uint64_t u64Shadow;
3568 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3569 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3570#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3571 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3572 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3573#else
3574 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3575 {
3576 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3577 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3578 }
3579 else
3580 {
3581 /*
3582 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3583 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3584 * re-construct CR4. See @bugref{9180#c95} for details.
3585 */
3586 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3587 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3588 u64Cr4 = (u64Cr4 & ~(pVmcsInfo->u64Cr4Mask & pVmcsNstGst->u64Cr4Mask.u))
3589 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3590 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3591 Assert(u64Cr4 & X86_CR4_VMXE);
3592 }
3593#endif
3594 pCtx->cr4 = u64Cr4;
3595}
3596
3597
3598/**
3599 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3600 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3601 */
3602DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3603{
3604 /*
3605 * We must import RIP here to set our EM interrupt-inhibited state.
3606 * We also import RFLAGS as our code that evaluates pending interrupts
3607 * before VM-entry requires it.
3608 */
3609 vmxHCImportGuestRip(pVCpu);
3610 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3611
3612 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3613 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3614 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3615 pVCpu->cpum.GstCtx.rip);
3616 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3617}
3618
3619
3620/**
3621 * Imports the guest interruptibility-state from the VMCS into the guest-CPU
3622 * context.
3623 *
3624 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3625 *
3626 * @param pVCpu The cross context virtual CPU structure.
3627 * @param pVmcsInfo The VMCS info. object.
3628 *
3629 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3630 * do not log!
3631 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3632 * instead!!!
3633 */
3634DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3635{
3636 uint32_t u32Val;
3637 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3638 Assert((u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
3639 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
3640 if (!u32Val)
3641 {
3642 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3643 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3644 }
3645 else
3646 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3647}
3648
3649
3650/**
3651 * Worker for VMXR0ImportStateOnDemand.
3652 *
3653 * @returns VBox status code.
3654 * @param pVCpu The cross context virtual CPU structure.
3655 * @param pVmcsInfo The VMCS info. object.
3656 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3657 */
3658static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3659{
3660 int rc = VINF_SUCCESS;
3661 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3662 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3663 uint32_t u32Val;
3664
3665 /*
3666 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3667 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3668 * neither are other host platforms.
3669 *
3670 * Committing this temporarily as it prevents BSOD.
3671 *
3672 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3673 */
3674#ifdef RT_OS_WINDOWS
3675 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3676 return VERR_HM_IPE_1;
3677#endif
3678
3679 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3680
3681#ifndef IN_NEM_DARWIN
3682 /*
3683 * We disable interrupts to make the updating of the state and in particular
3684 * the fExtrn modification atomic wrt to preemption hooks.
3685 */
3686 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3687#endif
3688
3689 fWhat &= pCtx->fExtrn;
3690 if (fWhat)
3691 {
3692 do
3693 {
3694 if (fWhat & CPUMCTX_EXTRN_RIP)
3695 vmxHCImportGuestRip(pVCpu);
3696
3697 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3698 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3699
3700 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3701 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3702 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3703
3704 if (fWhat & CPUMCTX_EXTRN_RSP)
3705 {
3706 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3707 AssertRC(rc);
3708 }
3709
3710 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3711 {
3712 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3713#ifndef IN_NEM_DARWIN
3714 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3715#else
3716 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3717#endif
3718 if (fWhat & CPUMCTX_EXTRN_CS)
3719 {
3720 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3721 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3722 if (fRealOnV86Active)
3723 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3724 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3725 }
3726 if (fWhat & CPUMCTX_EXTRN_SS)
3727 {
3728 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3729 if (fRealOnV86Active)
3730 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3731 }
3732 if (fWhat & CPUMCTX_EXTRN_DS)
3733 {
3734 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3735 if (fRealOnV86Active)
3736 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3737 }
3738 if (fWhat & CPUMCTX_EXTRN_ES)
3739 {
3740 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3741 if (fRealOnV86Active)
3742 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3743 }
3744 if (fWhat & CPUMCTX_EXTRN_FS)
3745 {
3746 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3747 if (fRealOnV86Active)
3748 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3749 }
3750 if (fWhat & CPUMCTX_EXTRN_GS)
3751 {
3752 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3753 if (fRealOnV86Active)
3754 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3755 }
3756 }
3757
3758 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3759 {
3760 if (fWhat & CPUMCTX_EXTRN_LDTR)
3761 vmxHCImportGuestLdtr(pVCpu);
3762
3763 if (fWhat & CPUMCTX_EXTRN_GDTR)
3764 {
3765 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3766 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3767 pCtx->gdtr.cbGdt = u32Val;
3768 }
3769
3770 /* Guest IDTR. */
3771 if (fWhat & CPUMCTX_EXTRN_IDTR)
3772 {
3773 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3774 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3775 pCtx->idtr.cbIdt = u32Val;
3776 }
3777
3778 /* Guest TR. */
3779 if (fWhat & CPUMCTX_EXTRN_TR)
3780 {
3781#ifndef IN_NEM_DARWIN
3782 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3783 don't need to import that one. */
3784 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3785#endif
3786 vmxHCImportGuestTr(pVCpu);
3787 }
3788 }
3789
3790 if (fWhat & CPUMCTX_EXTRN_DR7)
3791 {
3792#ifndef IN_NEM_DARWIN
3793 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3794#endif
3795 {
3796 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3797 AssertRC(rc);
3798 }
3799 }
3800
3801 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3802 {
3803 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3804 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3805 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3806 pCtx->SysEnter.cs = u32Val;
3807 }
3808
3809#ifndef IN_NEM_DARWIN
3810 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3811 {
3812 if ( pVM->hmr0.s.fAllow64BitGuests
3813 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3814 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3815 }
3816
3817 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3818 {
3819 if ( pVM->hmr0.s.fAllow64BitGuests
3820 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3821 {
3822 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3823 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3824 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3825 }
3826 }
3827
3828 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3829 {
3830 rc = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
3831 AssertRCReturn(rc, rc);
3832 }
3833#else
3834 NOREF(pVM);
3835#endif
3836
3837 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3838 {
3839 if (fWhat & CPUMCTX_EXTRN_CR0)
3840 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
3841
3842 if (fWhat & CPUMCTX_EXTRN_CR4)
3843 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
3844
3845 if (fWhat & CPUMCTX_EXTRN_CR3)
3846 vmxHCImportGuestCr3(pVCpu);
3847 }
3848
3849#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3850 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3851 {
3852 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3853 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3854 {
3855 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3856 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3857 if (RT_SUCCESS(rc))
3858 { /* likely */ }
3859 else
3860 break;
3861 }
3862 }
3863#endif
3864 } while (0);
3865
3866 if (RT_SUCCESS(rc))
3867 {
3868 /* Update fExtrn. */
3869 pCtx->fExtrn &= ~fWhat;
3870
3871 /* If everything has been imported, clear the HM keeper bit. */
3872 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3873 {
3874#ifndef IN_NEM_DARWIN
3875 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3876#else
3877 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3878#endif
3879 Assert(!pCtx->fExtrn);
3880 }
3881 }
3882 }
3883#ifndef IN_NEM_DARWIN
3884 else
3885 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3886
3887 /*
3888 * Restore interrupts.
3889 */
3890 ASMSetFlags(fEFlags);
3891#endif
3892
3893 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3894
3895 if (RT_SUCCESS(rc))
3896 { /* likely */ }
3897 else
3898 return rc;
3899
3900 /*
3901 * Honor any pending CR3 updates.
3902 *
3903 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3904 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3905 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3906 *
3907 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3908 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3909 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3910 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3911 *
3912 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3913 *
3914 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3915 */
3916 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3917#ifndef IN_NEM_DARWIN
3918 && VMMRZCallRing3IsEnabled(pVCpu)
3919#endif
3920 )
3921 {
3922 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3923 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3924 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3925 }
3926
3927 return VINF_SUCCESS;
3928}
3929
3930
3931/**
3932 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3933 *
3934 * @returns VBox status code.
3935 * @param pVCpu The cross context virtual CPU structure.
3936 * @param pVmcsInfo The VMCS info. object.
3937 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3938 * in NEM/darwin context.
3939 * @tparam a_fWhat What to import, zero or more bits from
3940 * HMVMX_CPUMCTX_EXTRN_ALL.
3941 */
3942template<uint64_t const a_fWhat>
3943static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3944{
3945 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3946 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3947 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3948 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3949
3950 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3951
3952 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3953
3954 /* RIP and RFLAGS may have been imported already by the post exit code
3955 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3956 of the code is skipping this part of the code. */
3957 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3958 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3959 {
3960 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3961 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3962
3963 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3964 {
3965 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3966 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3967 else
3968 vmxHCImportGuestCoreRip(pVCpu);
3969 }
3970 }
3971
3972 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3973 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3974 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3975
3976 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
3977 {
3978 if (a_fWhat & CPUMCTX_EXTRN_CS)
3979 {
3980 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3981 /** @todo try get rid of this carp, it smells and is probably never ever
3982 * used: */
3983 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3984 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3985 {
3986 vmxHCImportGuestCoreRip(pVCpu);
3987 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3988 }
3989 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3990 }
3991 if (a_fWhat & CPUMCTX_EXTRN_SS)
3992 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3993 if (a_fWhat & CPUMCTX_EXTRN_DS)
3994 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3995 if (a_fWhat & CPUMCTX_EXTRN_ES)
3996 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3997 if (a_fWhat & CPUMCTX_EXTRN_FS)
3998 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3999 if (a_fWhat & CPUMCTX_EXTRN_GS)
4000 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
4001
4002 /* Guest TR.
4003 Real-mode emulation using virtual-8086 mode has the fake TSS
4004 (pRealModeTSS) in TR, don't need to import that one. */
4005#ifndef IN_NEM_DARWIN
4006 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
4007 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
4008 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
4009#else
4010 if (a_fWhat & CPUMCTX_EXTRN_TR)
4011#endif
4012 vmxHCImportGuestTr(pVCpu);
4013
4014#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
4015 if (fRealOnV86Active)
4016 {
4017 if (a_fWhat & CPUMCTX_EXTRN_CS)
4018 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
4019 if (a_fWhat & CPUMCTX_EXTRN_SS)
4020 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
4021 if (a_fWhat & CPUMCTX_EXTRN_DS)
4022 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
4023 if (a_fWhat & CPUMCTX_EXTRN_ES)
4024 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
4025 if (a_fWhat & CPUMCTX_EXTRN_FS)
4026 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
4027 if (a_fWhat & CPUMCTX_EXTRN_GS)
4028 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
4029 }
4030#endif
4031 }
4032
4033 if (a_fWhat & CPUMCTX_EXTRN_RSP)
4034 {
4035 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
4036 AssertRC(rc);
4037 }
4038
4039 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
4040 vmxHCImportGuestLdtr(pVCpu);
4041
4042 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
4043 {
4044 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
4045 uint32_t u32Val;
4046 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
4047 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
4048 }
4049
4050 /* Guest IDTR. */
4051 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
4052 {
4053 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
4054 uint32_t u32Val;
4055 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
4056 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
4057 }
4058
4059 if (a_fWhat & CPUMCTX_EXTRN_DR7)
4060 {
4061#ifndef IN_NEM_DARWIN
4062 if (!pVCpu->hmr0.s.fUsingHyperDR7)
4063#endif
4064 {
4065 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
4066 AssertRC(rc);
4067 }
4068 }
4069
4070 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
4071 {
4072 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
4073 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
4074 uint32_t u32Val;
4075 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
4076 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
4077 }
4078
4079#ifndef IN_NEM_DARWIN
4080 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
4081 {
4082 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4083 && pVM->hmr0.s.fAllow64BitGuests)
4084 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4085 }
4086
4087 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4088 {
4089 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4090 && pVM->hmr0.s.fAllow64BitGuests)
4091 {
4092 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4093 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4094 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4095 }
4096 }
4097
4098 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4099 {
4100 int const rc1 = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
4101 AssertRCReturn(rc1, rc1);
4102 }
4103#else
4104 NOREF(pVM);
4105#endif
4106
4107 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4108 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
4109
4110 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4111 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
4112
4113 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4114 vmxHCImportGuestCr3(pVCpu);
4115
4116#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4117 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4118 {
4119 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4120 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4121 {
4122 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4123 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4124 AssertRCReturn(rc, rc);
4125 }
4126 }
4127#endif
4128
4129 /* Update fExtrn. */
4130 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4131
4132 /* If everything has been imported, clear the HM keeper bit. */
4133 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4134 {
4135#ifndef IN_NEM_DARWIN
4136 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4137#else
4138 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4139#endif
4140 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4141 }
4142
4143 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4144
4145 /*
4146 * Honor any pending CR3 updates.
4147 *
4148 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4149 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4150 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4151 *
4152 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4153 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4154 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4155 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4156 *
4157 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4158 *
4159 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4160 */
4161#ifndef IN_NEM_DARWIN
4162 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4163 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4164 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4165 return VINF_SUCCESS;
4166 ASMSetFlags(fEFlags);
4167#else
4168 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4169 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4170 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4171 return VINF_SUCCESS;
4172 RT_NOREF_PV(fEFlags);
4173#endif
4174
4175 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4176 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4177 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4178 return VINF_SUCCESS;
4179}
4180
4181
4182/**
4183 * Internal state fetcher.
4184 *
4185 * @returns VBox status code.
4186 * @param pVCpu The cross context virtual CPU structure.
4187 * @param pVmcsInfo The VMCS info. object.
4188 * @param pszCaller For logging.
4189 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4190 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4191 * already. This is ORed together with @a a_fWhat when
4192 * calculating what needs fetching (just for safety).
4193 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4194 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4195 * already. This is ORed together with @a a_fWhat when
4196 * calculating what needs fetching (just for safety).
4197 */
4198template<uint64_t const a_fWhat,
4199 uint64_t const a_fDoneLocal = 0,
4200 uint64_t const a_fDonePostExit = 0
4201#ifndef IN_NEM_DARWIN
4202 | CPUMCTX_EXTRN_INHIBIT_INT
4203 | CPUMCTX_EXTRN_INHIBIT_NMI
4204# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4205 | HMVMX_CPUMCTX_EXTRN_ALL
4206# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4207 | CPUMCTX_EXTRN_RFLAGS
4208# endif
4209#else /* IN_NEM_DARWIN */
4210 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4211#endif /* IN_NEM_DARWIN */
4212>
4213DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4214{
4215 RT_NOREF_PV(pszCaller);
4216 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4217 {
4218#ifndef IN_NEM_DARWIN
4219 /*
4220 * We disable interrupts to make the updating of the state and in particular
4221 * the fExtrn modification atomic wrt to preemption hooks.
4222 */
4223 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4224#else
4225 RTCCUINTREG const fEFlags = 0;
4226#endif
4227
4228 /*
4229 * We combine all three parameters and take the (probably) inlined optimized
4230 * code path for the new things specified in a_fWhat.
4231 *
4232 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4233 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4234 * also take the streamlined path when both of these are cleared in fExtrn
4235 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4236 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4237 */
4238 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4239 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4240 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4241 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4242 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4243 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4244 {
4245 int const rc = vmxHCImportGuestStateInner< a_fWhat
4246 & HMVMX_CPUMCTX_EXTRN_ALL
4247 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4248#ifndef IN_NEM_DARWIN
4249 ASMSetFlags(fEFlags);
4250#endif
4251 return rc;
4252 }
4253
4254#ifndef IN_NEM_DARWIN
4255 ASMSetFlags(fEFlags);
4256#endif
4257
4258 /*
4259 * We shouldn't normally get here, but it may happen when executing
4260 * in the debug run-loops. Typically, everything should already have
4261 * been fetched then. Otherwise call the fallback state import function.
4262 */
4263 if (fWhatToDo == 0)
4264 { /* hope the cause was the debug loop or something similar */ }
4265 else
4266 {
4267 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4268 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4269 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4270 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4271 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4272 }
4273 }
4274 return VINF_SUCCESS;
4275}
4276
4277
4278/**
4279 * Check per-VM and per-VCPU force flag actions that require us to go back to
4280 * ring-3 for one reason or another.
4281 *
4282 * @returns Strict VBox status code (i.e. informational status codes too)
4283 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4284 * ring-3.
4285 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4286 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4287 * interrupts)
4288 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4289 * all EMTs to be in ring-3.
4290 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4291 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4292 * to the EM loop.
4293 *
4294 * @param pVCpu The cross context virtual CPU structure.
4295 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4296 * @param fStepping Whether we are single-stepping the guest using the
4297 * hypervisor debugger.
4298 *
4299 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4300 * is no longer in VMX non-root mode.
4301 */
4302static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4303{
4304#ifndef IN_NEM_DARWIN
4305 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4306#endif
4307
4308 /*
4309 * Update pending interrupts into the APIC's IRR.
4310 */
4311 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4312 APICUpdatePendingInterrupts(pVCpu);
4313
4314 /*
4315 * Anything pending? Should be more likely than not if we're doing a good job.
4316 */
4317 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4318 if ( !fStepping
4319 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4320 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4321 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4322 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4323 return VINF_SUCCESS;
4324
4325 /* Pending PGM C3 sync. */
4326 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4327 {
4328 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4329 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4330 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4331 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4332 if (rcStrict != VINF_SUCCESS)
4333 {
4334 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4335 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4336 return rcStrict;
4337 }
4338 }
4339
4340 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4341 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4342 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4343 {
4344 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4345 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4346 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4347 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4348 return rc;
4349 }
4350
4351 /* Pending VM request packets, such as hardware interrupts. */
4352 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4353 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4354 {
4355 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4356 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4357 return VINF_EM_PENDING_REQUEST;
4358 }
4359
4360 /* Pending PGM pool flushes. */
4361 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4362 {
4363 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4364 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4365 return VINF_PGM_POOL_FLUSH_PENDING;
4366 }
4367
4368 /* Pending DMA requests. */
4369 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4370 {
4371 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4372 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4373 return VINF_EM_RAW_TO_R3;
4374 }
4375
4376#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4377 /*
4378 * Pending nested-guest events.
4379 *
4380 * Please note the priority of these events are specified and important.
4381 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4382 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4383 *
4384 * Interrupt-window and NMI-window VM-exits for the nested-guest need not be
4385 * handled here. They'll be handled by the hardware while executing the nested-guest
4386 * or by us when we injecting events that are not part of VM-entry of the nested-guest.
4387 */
4388 if (fIsNestedGuest)
4389 {
4390 /* Pending nested-guest APIC-write (may or may not cause a VM-exit). */
4391 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4392 {
4393 Log4Func(("Pending nested-guest APIC-write\n"));
4394 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4395 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4396 if ( rcStrict == VINF_SUCCESS
4397 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4398 return rcStrict;
4399 }
4400
4401 /* Pending nested-guest monitor-trap flag (MTF). */
4402 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4403 {
4404 Log4Func(("Pending nested-guest MTF\n"));
4405 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4406 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4407 return rcStrict;
4408 }
4409
4410 /* Pending nested-guest VMX-preemption timer expired. */
4411 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4412 {
4413 Log4Func(("Pending nested-guest preempt timer\n"));
4414 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4415 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4416 return rcStrict;
4417 }
4418 }
4419#else
4420 NOREF(fIsNestedGuest);
4421#endif
4422
4423 return VINF_SUCCESS;
4424}
4425
4426
4427/**
4428 * Converts any TRPM trap into a pending HM event. This is typically used when
4429 * entering from ring-3 (not longjmp returns).
4430 *
4431 * @param pVCpu The cross context virtual CPU structure.
4432 */
4433static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4434{
4435 Assert(TRPMHasTrap(pVCpu));
4436 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4437
4438 uint8_t uVector;
4439 TRPMEVENT enmTrpmEvent;
4440 uint32_t uErrCode;
4441 RTGCUINTPTR GCPtrFaultAddress;
4442 uint8_t cbInstr;
4443 bool fIcebp;
4444
4445 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4446 AssertRC(rc);
4447
4448 uint32_t u32IntInfo;
4449 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4450 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4451
4452 rc = TRPMResetTrap(pVCpu);
4453 AssertRC(rc);
4454 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4455 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4456
4457 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4458}
4459
4460
4461/**
4462 * Converts the pending HM event into a TRPM trap.
4463 *
4464 * @param pVCpu The cross context virtual CPU structure.
4465 */
4466static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4467{
4468 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4469
4470 /* If a trap was already pending, we did something wrong! */
4471 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4472
4473 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4474 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4475 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4476
4477 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4478
4479 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4480 AssertRC(rc);
4481
4482 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4483 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4484
4485 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4486 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4487 else
4488 {
4489 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4490 switch (uVectorType)
4491 {
4492 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4493 TRPMSetTrapDueToIcebp(pVCpu);
4494 RT_FALL_THRU();
4495 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4496 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4497 {
4498 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4499 || ( uVector == X86_XCPT_BP /* INT3 */
4500 || uVector == X86_XCPT_OF /* INTO */
4501 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4502 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4503 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4504 break;
4505 }
4506 }
4507 }
4508
4509 /* We're now done converting the pending event. */
4510 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4511}
4512
4513
4514/**
4515 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4516 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4517 *
4518 * @param pVCpu The cross context virtual CPU structure.
4519 * @param pVmcsInfo The VMCS info. object.
4520 */
4521static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4522{
4523 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4524 {
4525 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4526 {
4527 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4528 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4529 AssertRC(rc);
4530 }
4531 Log4Func(("Enabled interrupt-window exiting\n"));
4532 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4533}
4534
4535
4536/**
4537 * Clears the interrupt-window exiting control in the VMCS.
4538 *
4539 * @param pVCpu The cross context virtual CPU structure.
4540 * @param pVmcsInfo The VMCS info. object.
4541 */
4542DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4543{
4544 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4545 {
4546 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4547 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4548 AssertRC(rc);
4549 Log4Func(("Disabled interrupt-window exiting\n"));
4550 }
4551}
4552
4553
4554/**
4555 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4556 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4557 *
4558 * @param pVCpu The cross context virtual CPU structure.
4559 * @param pVmcsInfo The VMCS info. object.
4560 */
4561static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4562{
4563 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4564 {
4565 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4566 {
4567 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4568 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4569 AssertRC(rc);
4570 Log4Func(("Enabled NMI-window exiting\n"));
4571 }
4572 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4573}
4574
4575
4576/**
4577 * Clears the NMI-window exiting control in the VMCS.
4578 *
4579 * @param pVCpu The cross context virtual CPU structure.
4580 * @param pVmcsInfo The VMCS info. object.
4581 */
4582DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4583{
4584 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4585 {
4586 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4587 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4588 AssertRC(rc);
4589 Log4Func(("Disabled NMI-window exiting\n"));
4590 }
4591}
4592
4593
4594/**
4595 * Injects an event into the guest upon VM-entry by updating the relevant fields
4596 * in the VM-entry area in the VMCS.
4597 *
4598 * @returns Strict VBox status code (i.e. informational status codes too).
4599 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4600 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4601 *
4602 * @param pVCpu The cross context virtual CPU structure.
4603 * @param pVmcsInfo The VMCS info object.
4604 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4605 * @param pEvent The event being injected.
4606 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4607 * will be updated if necessary. This cannot not be NULL.
4608 * @param fStepping Whether we're single-stepping guest execution and should
4609 * return VINF_EM_DBG_STEPPED if the event is injected
4610 * directly (registers modified by us, not by hardware on
4611 * VM-entry).
4612 */
4613static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4614 bool fStepping, uint32_t *pfIntrState)
4615{
4616 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4617 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4618 Assert(pfIntrState);
4619
4620#ifdef IN_NEM_DARWIN
4621 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4622#endif
4623
4624 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4625 uint32_t u32IntInfo = pEvent->u64IntInfo;
4626 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4627 uint32_t const cbInstr = pEvent->cbInstr;
4628 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4629 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4630 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4631
4632#ifdef VBOX_STRICT
4633 /*
4634 * Validate the error-code-valid bit for hardware exceptions.
4635 * No error codes for exceptions in real-mode.
4636 *
4637 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4638 */
4639 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4640 && !CPUMIsGuestInRealModeEx(pCtx))
4641 {
4642 switch (uVector)
4643 {
4644 case X86_XCPT_PF:
4645 case X86_XCPT_DF:
4646 case X86_XCPT_TS:
4647 case X86_XCPT_NP:
4648 case X86_XCPT_SS:
4649 case X86_XCPT_GP:
4650 case X86_XCPT_AC:
4651 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4652 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4653 RT_FALL_THRU();
4654 default:
4655 break;
4656 }
4657 }
4658
4659 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4660 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4661 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4662#endif
4663
4664 RT_NOREF(uVector);
4665 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4666 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4667 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4668 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4669 {
4670 Assert(uVector <= X86_XCPT_LAST);
4671 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4672 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4673 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4674 }
4675 else
4676 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4677
4678 /*
4679 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4680 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4681 * interrupt handler in the (real-mode) guest.
4682 *
4683 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4684 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4685 */
4686 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4687 {
4688#ifndef IN_NEM_DARWIN
4689 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4690#endif
4691 {
4692 /*
4693 * For CPUs with unrestricted guest execution enabled and with the guest
4694 * in real-mode, we must not set the deliver-error-code bit.
4695 *
4696 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4697 */
4698 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4699 }
4700#ifndef IN_NEM_DARWIN
4701 else
4702 {
4703 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4704 Assert(PDMVmmDevHeapIsEnabled(pVM));
4705 Assert(pVM->hm.s.vmx.pRealModeTSS);
4706 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4707
4708 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4709 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4710 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4711 AssertRCReturn(rc2, rc2);
4712
4713 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4714 size_t const cbIdtEntry = sizeof(X86IDTR16);
4715 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4716 {
4717 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4718 if (uVector == X86_XCPT_DF)
4719 return VINF_EM_RESET;
4720
4721 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4722 No error codes for exceptions in real-mode. */
4723 if (uVector == X86_XCPT_GP)
4724 {
4725 static HMEVENT const s_EventXcptDf
4726 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4727 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4728 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4729 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4730 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4731 }
4732
4733 /*
4734 * If we're injecting an event with no valid IDT entry, inject a #GP.
4735 * No error codes for exceptions in real-mode.
4736 *
4737 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4738 */
4739 static HMEVENT const s_EventXcptGp
4740 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4741 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4742 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4743 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4744 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4745 }
4746
4747 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4748 uint16_t uGuestIp = pCtx->ip;
4749 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4750 {
4751 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4752 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4753 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4754 }
4755 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4756 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4757
4758 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4759 X86IDTR16 IdtEntry;
4760 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4761 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4762 AssertRCReturn(rc2, rc2);
4763
4764 /* Construct the stack frame for the interrupt/exception handler. */
4765 VBOXSTRICTRC rcStrict;
4766 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, (uint16_t)pCtx->eflags.u);
4767 if (rcStrict == VINF_SUCCESS)
4768 {
4769 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4770 if (rcStrict == VINF_SUCCESS)
4771 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4772 }
4773
4774 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4775 if (rcStrict == VINF_SUCCESS)
4776 {
4777 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4778 pCtx->rip = IdtEntry.offSel;
4779 pCtx->cs.Sel = IdtEntry.uSel;
4780 pCtx->cs.ValidSel = IdtEntry.uSel;
4781 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4782 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4783 && uVector == X86_XCPT_PF)
4784 pCtx->cr2 = GCPtrFault;
4785
4786 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4787 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4788 | HM_CHANGED_GUEST_RSP);
4789
4790 /*
4791 * If we delivered a hardware exception (other than an NMI) and if there was
4792 * block-by-STI in effect, we should clear it.
4793 */
4794 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4795 {
4796 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4797 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4798 Log4Func(("Clearing inhibition due to STI\n"));
4799 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4800 }
4801
4802 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4803 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4804
4805 /*
4806 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4807 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4808 */
4809 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4810
4811 /*
4812 * If we eventually support nested-guest execution without unrestricted guest execution,
4813 * we should set fInterceptEvents here.
4814 */
4815 Assert(!fIsNestedGuest);
4816
4817 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4818 if (fStepping)
4819 rcStrict = VINF_EM_DBG_STEPPED;
4820 }
4821 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4822 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4823 return rcStrict;
4824 }
4825#else
4826 RT_NOREF(pVmcsInfo);
4827#endif
4828 }
4829
4830 /*
4831 * Validate.
4832 */
4833 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4834 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4835
4836 /*
4837 * Inject the event into the VMCS.
4838 */
4839 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4840 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4841 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4842 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4843 AssertRC(rc);
4844
4845 /*
4846 * Update guest CR2 if this is a page-fault.
4847 */
4848 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4849 pCtx->cr2 = GCPtrFault;
4850
4851 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4852 return VINF_SUCCESS;
4853}
4854
4855
4856/**
4857 * Evaluates the event to be delivered to the guest and sets it as the pending
4858 * event.
4859 *
4860 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4861 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4862 * NOT restore these force-flags.
4863 *
4864 * @returns Strict VBox status code (i.e. informational status codes too).
4865 * @param pVCpu The cross context virtual CPU structure.
4866 * @param pVmcsInfo The VMCS information structure.
4867 * @param pfIntrState Where to store the updated VMX guest-interruptibility
4868 * state.
4869 */
4870static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t *pfIntrState)
4871{
4872 Assert(pfIntrState);
4873 Assert(!TRPMHasTrap(pVCpu));
4874
4875 *pfIntrState = vmxHCGetGuestIntrStateWithUpdate(pVCpu);
4876
4877 /*
4878 * Evaluate if a new event needs to be injected.
4879 * An event that's already pending has already performed all necessary checks.
4880 */
4881 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4882 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
4883 {
4884 /** @todo SMI. SMIs take priority over NMIs. */
4885
4886 /*
4887 * NMIs.
4888 * NMIs take priority over external interrupts.
4889 */
4890 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4891 {
4892 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4893 {
4894 /* Finally, inject the NMI and we're done. */
4895 vmxHCSetPendingXcptNmi(pVCpu);
4896 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4897 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4898 return VINF_SUCCESS;
4899 }
4900 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4901 }
4902 else
4903 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4904
4905 /*
4906 * External interrupts (PIC/APIC).
4907 */
4908 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4909 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4910 {
4911 Assert(!DBGFIsStepping(pVCpu));
4912 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4913 AssertRC(rc);
4914
4915 if (pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF)
4916 {
4917 /*
4918 * Once PDMGetInterrupt() returns an interrupt we -must- deliver it.
4919 * We cannot re-request the interrupt from the controller again.
4920 */
4921 uint8_t u8Interrupt;
4922 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4923 if (RT_SUCCESS(rc))
4924 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4925 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4926 {
4927 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4928 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4929 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4930 /*
4931 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4932 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4933 * need to re-set this force-flag here.
4934 */
4935 }
4936 else
4937 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4938
4939 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4940 return VINF_SUCCESS;
4941 }
4942 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4943 }
4944 else
4945 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4946 }
4947 else
4948 {
4949 /*
4950 * An event is being injected or we are in an interrupt shadow.
4951 * If another event is pending currently, instruct VT-x to cause a VM-exit as
4952 * soon as the guest is ready to accept it.
4953 */
4954 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4955 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4956 else
4957 {
4958 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT));
4959 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4960 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4961 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4962 else
4963 {
4964 /* It's possible that interrupt-window exiting is still active, clear it as it's now unnecessary. */
4965 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4966 }
4967 }
4968 }
4969
4970 return VINF_SUCCESS;
4971}
4972
4973
4974#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4975/**
4976 * Evaluates the event to be delivered to the nested-guest and sets it as the
4977 * pending event.
4978 *
4979 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4980 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4981 * NOT restore these force-flags.
4982 *
4983 * @returns Strict VBox status code (i.e. informational status codes too).
4984 * @param pVCpu The cross context virtual CPU structure.
4985 * @param pVmcsInfo The VMCS information structure.
4986 * @param pfIntrState Where to store the updated VMX guest-interruptibility
4987 * state.
4988 *
4989 * @remarks The guest must be in VMX non-root mode.
4990 */
4991static VBOXSTRICTRC vmxHCEvaluatePendingEventNested(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t *pfIntrState)
4992{
4993 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4994
4995 Assert(pfIntrState);
4996 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
4997 Assert(!TRPMHasTrap(pVCpu));
4998
4999 *pfIntrState = vmxHCGetGuestIntrStateWithUpdate(pVCpu);
5000
5001 /*
5002 * If we are injecting an event, all necessary checks have been performed.
5003 * Any interrupt-window or NMI-window exiting would have been setup by the
5004 * nested-guest while we merged controls.
5005 */
5006 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5007 return VINF_SUCCESS;
5008
5009 /*
5010 * An event injected by VMLAUNCH/VMRESUME instruction emulation should've been
5011 * made pending (TRPM to HM event) and would be handled above if we resumed
5012 * execution in HM. If somehow we fell back to emulation after the
5013 * VMLAUNCH/VMRESUME instruction, it would have been handled in iemRaiseXcptOrInt
5014 * (calling iemVmxVmexitEvent). Thus, if we get here the nested-hypervisor's VMX
5015 * intercepts should be active and any events pending here have been generated
5016 * while executing the guest in VMX non-root mode after virtual VM-entry completed.
5017 */
5018 Assert(CPUMIsGuestVmxInterceptEvents(pCtx));
5019
5020 /*
5021 * Interrupt shadows MAY block NMIs.
5022 * They also blocks external-interrupts and MAY block external-interrupt VM-exits.
5023 *
5024 * See Intel spec. 24.4.2 "Guest Non-Register State".
5025 * See Intel spec. 25.4.1 "Event Blocking".
5026 */
5027 if (!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
5028 { /* likely */ }
5029 else
5030 return VINF_SUCCESS;
5031
5032 /** @todo SMI. SMIs take priority over NMIs. */
5033
5034 /*
5035 * NMIs.
5036 * NMIs take priority over interrupts.
5037 */
5038 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
5039 {
5040 /*
5041 * Nested-guest NMI-window exiting.
5042 * The NMI-window exit must happen regardless of whether an NMI is pending
5043 * provided virtual-NMI blocking is not in effect.
5044 *
5045 * See Intel spec. 25.2 "Other Causes Of VM Exits".
5046 */
5047 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
5048 && !CPUMIsGuestVmxVirtNmiBlocking(pCtx))
5049 {
5050 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
5051 return IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
5052 }
5053
5054 /*
5055 * For a nested-guest, the FF always indicates the outer guest's ability to
5056 * receive an NMI while the guest-interruptibility state bit depends on whether
5057 * the nested-hypervisor is using virtual-NMIs.
5058 *
5059 * It is very important that we also clear the force-flag if we are causing
5060 * an NMI VM-exit as it is the responsibility of the nested-hypervisor to deal
5061 * with re-injecting or discarding the NMI. This fixes the bug that showed up
5062 * with SMP Windows Server 2008 R2 with Hyper-V enabled, see @bugref{10318#c19}.
5063 */
5064 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5065 {
5066 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
5067 return IEMExecVmxVmexitXcptNmi(pVCpu);
5068 vmxHCSetPendingXcptNmi(pVCpu);
5069 return VINF_SUCCESS;
5070 }
5071 }
5072
5073 /*
5074 * Nested-guest interrupt-window exiting.
5075 *
5076 * We must cause the interrupt-window exit regardless of whether an interrupt is pending
5077 * provided virtual interrupts are enabled.
5078 *
5079 * See Intel spec. 25.2 "Other Causes Of VM Exits".
5080 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5081 */
5082 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
5083 && CPUMIsGuestVmxVirtIntrEnabled(pCtx))
5084 {
5085 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
5086 return IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
5087 }
5088
5089 /*
5090 * External interrupts (PIC/APIC).
5091 *
5092 * When "External interrupt exiting" is set the VM-exit happens regardless of RFLAGS.IF.
5093 * When it isn't set, RFLAGS.IF controls delivery of the interrupt as always.
5094 * This fixes a nasty SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued
5095 * by other VM-exits (like a preemption timer), see @bugref{9562#c18}.
5096 *
5097 * NMIs block external interrupts as they are dispatched through the interrupt gate (vector 2)
5098 * which automatically clears EFLAGS.IF. Also it's possible an NMI handler could enable interrupts
5099 * and thus we should not check for NMI inhibition here.
5100 *
5101 * See Intel spec. 25.4.1 "Event Blocking".
5102 * See Intel spec. 6.8.1 "Masking Maskable Hardware Interrupts".
5103 */
5104 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5105 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5106 {
5107 Assert(!DBGFIsStepping(pVCpu));
5108 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
5109 AssertRC(rc);
5110 if (CPUMIsGuestVmxPhysIntrEnabled(pCtx))
5111 {
5112 /* Nested-guest external interrupt VM-exit. */
5113 if ( CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
5114 && !CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
5115 {
5116 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
5117 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5118 return rcStrict;
5119 }
5120
5121 /*
5122 * Fetch the external interrupt from the interrupt controller.
5123 * Once PDMGetInterrupt() returns an interrupt we -must- deliver it or pass it to
5124 * the nested-hypervisor. We cannot re-request the interrupt from the controller again.
5125 */
5126 uint8_t u8Interrupt;
5127 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5128 if (RT_SUCCESS(rc))
5129 {
5130 /* Nested-guest external interrupt VM-exit when the "acknowledge interrupt on exit" is enabled. */
5131 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5132 {
5133 Assert(CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT));
5134 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
5135 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5136 return rcStrict;
5137 }
5138 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
5139 return VINF_SUCCESS;
5140 }
5141 }
5142 }
5143 return VINF_SUCCESS;
5144}
5145#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5146
5147
5148/**
5149 * Injects any pending events into the guest if the guest is in a state to
5150 * receive them.
5151 *
5152 * @returns Strict VBox status code (i.e. informational status codes too).
5153 * @param pVCpu The cross context virtual CPU structure.
5154 * @param pVmcsInfo The VMCS information structure.
5155 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5156 * @param fIntrState The VT-x guest-interruptibility state.
5157 * @param fStepping Whether we are single-stepping the guest using the
5158 * hypervisor debugger and should return
5159 * VINF_EM_DBG_STEPPED if the event was dispatched
5160 * directly.
5161 */
5162static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5163 uint32_t fIntrState, bool fStepping)
5164{
5165 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5166#ifndef IN_NEM_DARWIN
5167 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5168#endif
5169
5170#ifdef VBOX_STRICT
5171 /*
5172 * Verify guest-interruptibility state.
5173 *
5174 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5175 * since injecting an event may modify the interruptibility state and we must thus always
5176 * use fIntrState.
5177 */
5178 {
5179 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5180 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5181 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5182 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5183 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5184 Assert(!TRPMHasTrap(pVCpu));
5185 NOREF(fBlockMovSS); NOREF(fBlockSti);
5186 }
5187#endif
5188
5189 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5190 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5191 {
5192 /*
5193 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5194 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5195 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5196 *
5197 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5198 */
5199 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5200#ifdef VBOX_STRICT
5201 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5202 {
5203 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5204 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5205 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5206 }
5207 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5208 {
5209 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5210 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5211 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5212 }
5213#endif
5214 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5215 uIntType));
5216
5217 /*
5218 * Inject the event and get any changes to the guest-interruptibility state.
5219 *
5220 * The guest-interruptibility state may need to be updated if we inject the event
5221 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5222 */
5223 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5224 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5225
5226 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5227 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5228 else
5229 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5230 }
5231
5232 /*
5233 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5234 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5235 */
5236 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5237 && !fIsNestedGuest)
5238 {
5239 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5240
5241 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5242 {
5243 /*
5244 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5245 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5246 */
5247 Assert(!DBGFIsStepping(pVCpu));
5248 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_TF);
5249 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
5250 fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5251 AssertRC(rc);
5252 }
5253 else
5254 {
5255 /*
5256 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5257 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5258 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5259 * we use MTF, so just make sure it's called before executing guest-code.
5260 */
5261 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5262 }
5263 }
5264 /* else: for nested-guest currently handling while merging controls. */
5265
5266 /*
5267 * Finally, update the guest-interruptibility state.
5268 *
5269 * This is required for the real-on-v86 software interrupt injection, for
5270 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5271 */
5272 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5273 AssertRC(rc);
5274
5275 /*
5276 * There's no need to clear the VM-entry interruption-information field here if we're not
5277 * injecting anything. VT-x clears the valid bit on every VM-exit.
5278 *
5279 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5280 */
5281
5282 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5283 return rcStrict;
5284}
5285
5286
5287/**
5288 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5289 * and update error record fields accordingly.
5290 *
5291 * @returns VMX_IGS_* error codes.
5292 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5293 * wrong with the guest state.
5294 *
5295 * @param pVCpu The cross context virtual CPU structure.
5296 * @param pVmcsInfo The VMCS info. object.
5297 *
5298 * @remarks This function assumes our cache of the VMCS controls
5299 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5300 */
5301static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5302{
5303#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5304#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { uError = (err); break; } else do { } while (0)
5305
5306 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5307 uint32_t uError = VMX_IGS_ERROR;
5308 uint32_t u32IntrState = 0;
5309#ifndef IN_NEM_DARWIN
5310 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5311 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5312#else
5313 bool const fUnrestrictedGuest = true;
5314#endif
5315 do
5316 {
5317 int rc;
5318
5319 /*
5320 * Guest-interruptibility state.
5321 *
5322 * Read this first so that any check that fails prior to those that actually
5323 * require the guest-interruptibility state would still reflect the correct
5324 * VMCS value and avoids causing further confusion.
5325 */
5326 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5327 AssertRC(rc);
5328
5329 uint32_t u32Val;
5330 uint64_t u64Val;
5331
5332 /*
5333 * CR0.
5334 */
5335 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5336 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5337 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5338 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5339 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5340 if (fUnrestrictedGuest)
5341 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5342
5343 uint64_t u64GuestCr0;
5344 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5345 AssertRC(rc);
5346 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5347 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5348 if ( !fUnrestrictedGuest
5349 && (u64GuestCr0 & X86_CR0_PG)
5350 && !(u64GuestCr0 & X86_CR0_PE))
5351 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5352
5353 /*
5354 * CR4.
5355 */
5356 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5357 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5358 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5359
5360 uint64_t u64GuestCr4;
5361 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5362 AssertRC(rc);
5363 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5364 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5365
5366 /*
5367 * IA32_DEBUGCTL MSR.
5368 */
5369 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5370 AssertRC(rc);
5371 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5372 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5373 {
5374 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5375 }
5376 uint64_t u64DebugCtlMsr = u64Val;
5377
5378#ifdef VBOX_STRICT
5379 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5380 AssertRC(rc);
5381 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5382#endif
5383 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5384
5385 /*
5386 * RIP and RFLAGS.
5387 */
5388 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5389 AssertRC(rc);
5390 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5391 if ( !fLongModeGuest
5392 || !pCtx->cs.Attr.n.u1Long)
5393 {
5394 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5395 }
5396 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5397 * must be identical if the "IA-32e mode guest" VM-entry
5398 * control is 1 and CS.L is 1. No check applies if the
5399 * CPU supports 64 linear-address bits. */
5400
5401 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5402 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5403 AssertRC(rc);
5404 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5405 VMX_IGS_RFLAGS_RESERVED);
5406 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5407 uint32_t const u32Eflags = u64Val;
5408
5409 if ( fLongModeGuest
5410 || ( fUnrestrictedGuest
5411 && !(u64GuestCr0 & X86_CR0_PE)))
5412 {
5413 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5414 }
5415
5416 uint32_t u32EntryInfo;
5417 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5418 AssertRC(rc);
5419 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5420 {
5421 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5422 }
5423
5424 /*
5425 * 64-bit checks.
5426 */
5427 if (fLongModeGuest)
5428 {
5429 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5430 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5431 }
5432
5433 if ( !fLongModeGuest
5434 && (u64GuestCr4 & X86_CR4_PCIDE))
5435 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5436
5437 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5438 * 51:32 beyond the processor's physical-address width are 0. */
5439
5440 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5441 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5442 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5443
5444#ifndef IN_NEM_DARWIN
5445 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5446 AssertRC(rc);
5447 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5448
5449 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5450 AssertRC(rc);
5451 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5452#endif
5453
5454 /*
5455 * PERF_GLOBAL MSR.
5456 */
5457 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5458 {
5459 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5460 AssertRC(rc);
5461 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5462 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5463 }
5464
5465 /*
5466 * PAT MSR.
5467 */
5468 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5469 {
5470 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5471 AssertRC(rc);
5472 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5473 for (unsigned i = 0; i < 8; i++)
5474 {
5475 uint8_t u8Val = (u64Val & 0xff);
5476 if ( u8Val > MSR_IA32_PAT_MT_UCD
5477 || u8Val == MSR_IA32_PAT_MT_RSVD_2
5478 || u8Val == MSR_IA32_PAT_MT_RSVD_3)
5479 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5480 u64Val >>= 8;
5481 }
5482 }
5483
5484 /*
5485 * EFER MSR.
5486 */
5487 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5488 {
5489 Assert(g_fHmVmxSupportsVmcsEfer);
5490 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5491 AssertRC(rc);
5492 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5493 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5494 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5495 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5496 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5497 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5498 * iemVmxVmentryCheckGuestState(). */
5499 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5500 || !(u64GuestCr0 & X86_CR0_PG)
5501 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5502 VMX_IGS_EFER_LMA_LME_MISMATCH);
5503 }
5504
5505 /*
5506 * Segment registers.
5507 */
5508 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5509 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5510 if (!(u32Eflags & X86_EFL_VM))
5511 {
5512 /* CS */
5513 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5514 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5515 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5516 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5517 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5518 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5519 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5520 /* CS cannot be loaded with NULL in protected mode. */
5521 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5522 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5523 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5524 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5525 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5526 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5527 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5528 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5529 else
5530 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5531
5532 /* SS */
5533 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5534 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5535 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5536 if ( !(pCtx->cr0 & X86_CR0_PE)
5537 || pCtx->cs.Attr.n.u4Type == 3)
5538 {
5539 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5540 }
5541
5542 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5543 {
5544 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5545 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5546 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5547 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5548 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5549 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5550 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5551 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5552 }
5553
5554 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5555 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5556 {
5557 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5558 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5559 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5560 || pCtx->ds.Attr.n.u4Type > 11
5561 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5562 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5563 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5564 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5565 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5566 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5567 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5568 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5569 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5570 }
5571 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5572 {
5573 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5574 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5575 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5576 || pCtx->es.Attr.n.u4Type > 11
5577 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5578 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5579 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5580 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5581 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5582 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5583 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5584 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5585 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5586 }
5587 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5588 {
5589 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5590 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5591 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5592 || pCtx->fs.Attr.n.u4Type > 11
5593 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5594 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5595 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5596 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5597 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5598 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5599 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5600 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5601 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5602 }
5603 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5604 {
5605 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5606 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5607 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5608 || pCtx->gs.Attr.n.u4Type > 11
5609 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5610 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5611 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5612 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5613 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5614 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5615 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5616 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5617 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5618 }
5619 /* 64-bit capable CPUs. */
5620 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5621 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5622 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5623 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5624 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5625 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5626 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5627 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5628 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5629 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5630 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5631 }
5632 else
5633 {
5634 /* V86 mode checks. */
5635 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5636 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5637 {
5638 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5639 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5640 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5641 }
5642 else
5643 {
5644 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5645 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5646 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5647 }
5648
5649 /* CS */
5650 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5651 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5652 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5653 /* SS */
5654 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5655 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5656 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5657 /* DS */
5658 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5659 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5660 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5661 /* ES */
5662 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5663 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5664 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5665 /* FS */
5666 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5667 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5668 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5669 /* GS */
5670 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5671 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5672 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5673 /* 64-bit capable CPUs. */
5674 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5675 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5676 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5677 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5678 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5679 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5680 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5681 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5682 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5683 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5684 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5685 }
5686
5687 /*
5688 * TR.
5689 */
5690 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5691 /* 64-bit capable CPUs. */
5692 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5693 if (fLongModeGuest)
5694 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5695 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5696 else
5697 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5698 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5699 VMX_IGS_TR_ATTR_TYPE_INVALID);
5700 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5701 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5702 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5703 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5704 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5705 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5706 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5707 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5708
5709 /*
5710 * GDTR and IDTR (64-bit capable checks).
5711 */
5712 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5713 AssertRC(rc);
5714 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5715
5716 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5717 AssertRC(rc);
5718 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5719
5720 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5721 AssertRC(rc);
5722 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5723
5724 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5725 AssertRC(rc);
5726 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5727
5728 /*
5729 * Guest Non-Register State.
5730 */
5731 /* Activity State. */
5732 uint32_t u32ActivityState;
5733 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5734 AssertRC(rc);
5735 HMVMX_CHECK_BREAK( !u32ActivityState
5736 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5737 VMX_IGS_ACTIVITY_STATE_INVALID);
5738 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5739 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5740
5741 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5742 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5743 {
5744 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5745 }
5746
5747 /** @todo Activity state and injecting interrupts. Left as a todo since we
5748 * currently don't use activity states but ACTIVE. */
5749
5750 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5751 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5752
5753 /* Guest interruptibility-state. */
5754 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5755 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5756 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5757 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5758 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5759 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5760 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5761 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5762 {
5763 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5764 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5765 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5766 }
5767 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5768 {
5769 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5770 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5771 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5772 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5773 }
5774 /** @todo Assumes the processor is not in SMM. */
5775 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5776 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5777 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5778 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5779 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5780 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5781 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5782 {
5783 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5784 }
5785
5786 /* Pending debug exceptions. */
5787 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5788 AssertRC(rc);
5789 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5790 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5791 u32Val = u64Val; /* For pending debug exceptions checks below. */
5792
5793 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5794 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5795 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5796 {
5797 if ( (u32Eflags & X86_EFL_TF)
5798 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5799 {
5800 /* Bit 14 is PendingDebug.BS. */
5801 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5802 }
5803 if ( !(u32Eflags & X86_EFL_TF)
5804 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5805 {
5806 /* Bit 14 is PendingDebug.BS. */
5807 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5808 }
5809 }
5810
5811#ifndef IN_NEM_DARWIN
5812 /* VMCS link pointer. */
5813 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5814 AssertRC(rc);
5815 if (u64Val != UINT64_C(0xffffffffffffffff))
5816 {
5817 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5818 /** @todo Bits beyond the processor's physical-address width MBZ. */
5819 /** @todo SMM checks. */
5820 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5821 Assert(pVmcsInfo->pvShadowVmcs);
5822 VMXVMCSREVID VmcsRevId;
5823 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5824 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5825 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5826 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5827 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5828 }
5829
5830 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5831 * not using nested paging? */
5832 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5833 && !fLongModeGuest
5834 && CPUMIsGuestInPAEModeEx(pCtx))
5835 {
5836 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5837 AssertRC(rc);
5838 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5839
5840 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5841 AssertRC(rc);
5842 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5843
5844 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5845 AssertRC(rc);
5846 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5847
5848 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5849 AssertRC(rc);
5850 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5851 }
5852#endif
5853
5854 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5855 if (uError == VMX_IGS_ERROR)
5856 uError = VMX_IGS_REASON_NOT_FOUND;
5857 } while (0);
5858
5859 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5860 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5861 return uError;
5862
5863#undef HMVMX_ERROR_BREAK
5864#undef HMVMX_CHECK_BREAK
5865}
5866
5867
5868#ifndef HMVMX_USE_FUNCTION_TABLE
5869/**
5870 * Handles a guest VM-exit from hardware-assisted VMX execution.
5871 *
5872 * @returns Strict VBox status code (i.e. informational status codes too).
5873 * @param pVCpu The cross context virtual CPU structure.
5874 * @param pVmxTransient The VMX-transient structure.
5875 */
5876DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5877{
5878#ifdef DEBUG_ramshankar
5879# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5880 do { \
5881 if (a_fSave != 0) \
5882 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5883 VBOXSTRICTRC rcStrict = a_CallExpr; \
5884 if (a_fSave != 0) \
5885 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5886 return rcStrict; \
5887 } while (0)
5888#else
5889# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5890#endif
5891 uint32_t const uExitReason = pVmxTransient->uExitReason;
5892 switch (uExitReason)
5893 {
5894 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5895 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5896 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5897 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5898 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5899 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5900 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5901 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5902 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5903 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5904 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5905 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5906 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5907 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5908 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5909 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5910 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5911 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5912 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5913 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5914 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5915 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5916 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5917 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5918 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5919 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5920 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5921 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5922 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5923 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5924#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5925 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5926 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5927 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5928 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5929 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5930 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5931 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5932 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5933 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5934 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5935#else
5936 case VMX_EXIT_VMCLEAR:
5937 case VMX_EXIT_VMLAUNCH:
5938 case VMX_EXIT_VMPTRLD:
5939 case VMX_EXIT_VMPTRST:
5940 case VMX_EXIT_VMREAD:
5941 case VMX_EXIT_VMRESUME:
5942 case VMX_EXIT_VMWRITE:
5943 case VMX_EXIT_VMXOFF:
5944 case VMX_EXIT_VMXON:
5945 case VMX_EXIT_INVVPID:
5946 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5947#endif
5948#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5949 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5950#else
5951 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5952#endif
5953
5954 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5955 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5956 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5957
5958 case VMX_EXIT_INIT_SIGNAL:
5959 case VMX_EXIT_SIPI:
5960 case VMX_EXIT_IO_SMI:
5961 case VMX_EXIT_SMI:
5962 case VMX_EXIT_ERR_MSR_LOAD:
5963 case VMX_EXIT_ERR_MACHINE_CHECK:
5964 case VMX_EXIT_PML_FULL:
5965 case VMX_EXIT_VIRTUALIZED_EOI:
5966 case VMX_EXIT_GDTR_IDTR_ACCESS:
5967 case VMX_EXIT_LDTR_TR_ACCESS:
5968 case VMX_EXIT_APIC_WRITE:
5969 case VMX_EXIT_RDRAND:
5970 case VMX_EXIT_RSM:
5971 case VMX_EXIT_VMFUNC:
5972 case VMX_EXIT_ENCLS:
5973 case VMX_EXIT_RDSEED:
5974 case VMX_EXIT_XSAVES:
5975 case VMX_EXIT_XRSTORS:
5976 case VMX_EXIT_UMWAIT:
5977 case VMX_EXIT_TPAUSE:
5978 case VMX_EXIT_LOADIWKEY:
5979 default:
5980 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5981 }
5982#undef VMEXIT_CALL_RET
5983}
5984#endif /* !HMVMX_USE_FUNCTION_TABLE */
5985
5986
5987#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5988/**
5989 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5990 *
5991 * @returns Strict VBox status code (i.e. informational status codes too).
5992 * @param pVCpu The cross context virtual CPU structure.
5993 * @param pVmxTransient The VMX-transient structure.
5994 */
5995DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5996{
5997#ifdef DEBUG_ramshankar
5998# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5999 do { \
6000 if (a_fSave != 0) \
6001 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
6002 VBOXSTRICTRC rcStrict = a_CallExpr; \
6003 return rcStrict; \
6004 } while (0)
6005#else
6006# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
6007#endif
6008
6009 uint32_t const uExitReason = pVmxTransient->uExitReason;
6010 switch (uExitReason)
6011 {
6012# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6013 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient));
6014 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolationNested(pVCpu, pVmxTransient));
6015# else
6016 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
6017 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
6018# endif
6019 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient));
6020 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstrNested(pVCpu, pVmxTransient));
6021 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHltNested(pVCpu, pVmxTransient));
6022
6023 /*
6024 * We shouldn't direct host physical interrupts to the nested-guest.
6025 */
6026 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
6027
6028 /*
6029 * Instructions that cause VM-exits unconditionally or the condition is
6030 * always taken solely from the nested hypervisor (meaning if the VM-exit
6031 * happens, it's guaranteed to be a nested-guest VM-exit).
6032 *
6033 * - Provides VM-exit instruction length ONLY.
6034 */
6035 case VMX_EXIT_CPUID: /* Unconditional. */
6036 case VMX_EXIT_VMCALL:
6037 case VMX_EXIT_GETSEC:
6038 case VMX_EXIT_INVD:
6039 case VMX_EXIT_XSETBV:
6040 case VMX_EXIT_VMLAUNCH:
6041 case VMX_EXIT_VMRESUME:
6042 case VMX_EXIT_VMXOFF:
6043 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
6044 case VMX_EXIT_VMFUNC:
6045 VMEXIT_CALL_RET(0, vmxHCExitInstrNested(pVCpu, pVmxTransient));
6046
6047 /*
6048 * Instructions that cause VM-exits unconditionally or the condition is
6049 * always taken solely from the nested hypervisor (meaning if the VM-exit
6050 * happens, it's guaranteed to be a nested-guest VM-exit).
6051 *
6052 * - Provides VM-exit instruction length.
6053 * - Provides VM-exit information.
6054 * - Optionally provides Exit qualification.
6055 *
6056 * Since Exit qualification is 0 for all VM-exits where it is not
6057 * applicable, reading and passing it to the guest should produce
6058 * defined behavior.
6059 *
6060 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
6061 */
6062 case VMX_EXIT_INVEPT: /* Unconditional. */
6063 case VMX_EXIT_INVVPID:
6064 case VMX_EXIT_VMCLEAR:
6065 case VMX_EXIT_VMPTRLD:
6066 case VMX_EXIT_VMPTRST:
6067 case VMX_EXIT_VMXON:
6068 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
6069 case VMX_EXIT_LDTR_TR_ACCESS:
6070 case VMX_EXIT_RDRAND:
6071 case VMX_EXIT_RDSEED:
6072 case VMX_EXIT_XSAVES:
6073 case VMX_EXIT_XRSTORS:
6074 case VMX_EXIT_UMWAIT:
6075 case VMX_EXIT_TPAUSE:
6076 VMEXIT_CALL_RET(0, vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient));
6077
6078 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtscNested(pVCpu, pVmxTransient));
6079 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscpNested(pVCpu, pVmxTransient));
6080 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsrNested(pVCpu, pVmxTransient));
6081 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsrNested(pVCpu, pVmxTransient));
6082 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpgNested(pVCpu, pVmxTransient));
6083 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcidNested(pVCpu, pVmxTransient));
6084 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient));
6085 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvdNested(pVCpu, pVmxTransient));
6086 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtfNested(pVCpu, pVmxTransient));
6087 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccessNested(pVCpu, pVmxTransient));
6088 case VMX_EXIT_APIC_WRITE: VMEXIT_CALL_RET(0, vmxHCExitApicWriteNested(pVCpu, pVmxTransient));
6089 case VMX_EXIT_VIRTUALIZED_EOI: VMEXIT_CALL_RET(0, vmxHCExitVirtEoiNested(pVCpu, pVmxTransient));
6090 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRxNested(pVCpu, pVmxTransient));
6091 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindowNested(pVCpu, pVmxTransient));
6092 case VMX_EXIT_NMI_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitNmiWindowNested(pVCpu, pVmxTransient));
6093 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient));
6094 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwaitNested(pVCpu, pVmxTransient));
6095 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitorNested(pVCpu, pVmxTransient));
6096 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPauseNested(pVCpu, pVmxTransient));
6097
6098 case VMX_EXIT_PREEMPT_TIMER:
6099 {
6100 /** @todo NSTVMX: Preempt timer. */
6101 VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
6102 }
6103
6104 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRxNested(pVCpu, pVmxTransient));
6105 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmcNested(pVCpu, pVmxTransient));
6106
6107 case VMX_EXIT_VMREAD:
6108 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient));
6109
6110 case VMX_EXIT_TRIPLE_FAULT: VMEXIT_CALL_RET(0, vmxHCExitTripleFaultNested(pVCpu, pVmxTransient));
6111 case VMX_EXIT_ERR_INVALID_GUEST_STATE: VMEXIT_CALL_RET(0, vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient));
6112
6113 case VMX_EXIT_INIT_SIGNAL:
6114 case VMX_EXIT_SIPI:
6115 case VMX_EXIT_IO_SMI:
6116 case VMX_EXIT_SMI:
6117 case VMX_EXIT_ERR_MSR_LOAD:
6118 case VMX_EXIT_ERR_MACHINE_CHECK:
6119 case VMX_EXIT_PML_FULL:
6120 case VMX_EXIT_RSM:
6121 default:
6122 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6123 }
6124#undef VMEXIT_CALL_RET
6125}
6126#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6127
6128
6129/** @name VM-exit helpers.
6130 * @{
6131 */
6132/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6133/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6134/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6135
6136/** Macro for VM-exits called unexpectedly. */
6137#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
6138 do { \
6139 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6140 return VERR_VMX_UNEXPECTED_EXIT; \
6141 } while (0)
6142
6143#ifdef VBOX_STRICT
6144# ifndef IN_NEM_DARWIN
6145/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6146# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6147 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6148
6149# define HMVMX_ASSERT_PREEMPT_CPUID() \
6150 do { \
6151 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6152 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6153 } while (0)
6154
6155# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6156 do { \
6157 AssertPtr((a_pVCpu)); \
6158 AssertPtr((a_pVmxTransient)); \
6159 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6160 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6161 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6162 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6163 Assert((a_pVmxTransient)->pVmcsInfo); \
6164 Assert(ASMIntAreEnabled()); \
6165 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6166 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6167 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6168 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6169 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6170 HMVMX_ASSERT_PREEMPT_CPUID(); \
6171 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6172 } while (0)
6173# else
6174# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6175# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6176# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6177 do { \
6178 AssertPtr((a_pVCpu)); \
6179 AssertPtr((a_pVmxTransient)); \
6180 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6181 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6182 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6183 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6184 Assert((a_pVmxTransient)->pVmcsInfo); \
6185 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6186 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6187 } while (0)
6188# endif
6189
6190# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6191 do { \
6192 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6193 Assert((a_pVmxTransient)->fIsNestedGuest); \
6194 } while (0)
6195
6196# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6197 do { \
6198 Log4Func(("\n")); \
6199 } while (0)
6200#else
6201# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6202 do { \
6203 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6204 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6205 } while (0)
6206
6207# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6208 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6209
6210# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6211#endif
6212
6213#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6214/** Macro that does the necessary privilege checks and intercepted VM-exits for
6215 * guests that attempted to execute a VMX instruction. */
6216# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6217 do \
6218 { \
6219 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6220 if (rcStrictTmp == VINF_SUCCESS) \
6221 { /* likely */ } \
6222 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6223 { \
6224 Assert((a_pVCpu)->hm.s.Event.fPending); \
6225 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6226 return VINF_SUCCESS; \
6227 } \
6228 else \
6229 { \
6230 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6231 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6232 } \
6233 } while (0)
6234
6235/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6236# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6237 do \
6238 { \
6239 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6240 (a_pGCPtrEffAddr)); \
6241 if (rcStrictTmp == VINF_SUCCESS) \
6242 { /* likely */ } \
6243 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6244 { \
6245 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6246 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6247 NOREF(uXcptTmp); \
6248 return VINF_SUCCESS; \
6249 } \
6250 else \
6251 { \
6252 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6253 return rcStrictTmp; \
6254 } \
6255 } while (0)
6256#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6257
6258
6259/**
6260 * Advances the guest RIP by the specified number of bytes.
6261 *
6262 * @param pVCpu The cross context virtual CPU structure.
6263 * @param cbInstr Number of bytes to advance the RIP by.
6264 *
6265 * @remarks No-long-jump zone!!!
6266 */
6267DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6268{
6269 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
6270
6271 /*
6272 * Advance RIP.
6273 *
6274 * The upper 32 bits are only set when in 64-bit mode, so we have to detect
6275 * when the addition causes a "carry" into the upper half and check whether
6276 * we're in 64-bit and can go on with it or wether we should zap the top
6277 * half. (Note! The 8086, 80186 and 80286 emulation is done exclusively in
6278 * IEM, so we don't need to bother with pre-386 16-bit wraparound.)
6279 *
6280 * See PC wrap around tests in bs3-cpu-weird-1.
6281 */
6282 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
6283 uint64_t const uRipNext = uRipPrev + cbInstr;
6284 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & RT_BIT_64(32))
6285 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
6286 pVCpu->cpum.GstCtx.rip = uRipNext;
6287 else
6288 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
6289
6290 /*
6291 * Clear RF and interrupt shadowing.
6292 */
6293 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF))))
6294 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
6295 else
6296 {
6297 if ((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF)) == X86_EFL_TF)
6298 {
6299 /** @todo \#DB - single step. */
6300 }
6301 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
6302 }
6303 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
6304
6305 /* Mark both RIP and RFLAGS as updated. */
6306 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6307}
6308
6309
6310/**
6311 * Advances the guest RIP after reading it from the VMCS.
6312 *
6313 * @returns VBox status code, no informational status codes.
6314 * @param pVCpu The cross context virtual CPU structure.
6315 * @param pVmxTransient The VMX-transient structure.
6316 *
6317 * @remarks No-long-jump zone!!!
6318 */
6319static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6320{
6321 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6322 /** @todo consider template here after checking callers. */
6323 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6324 AssertRCReturn(rc, rc);
6325
6326 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6327 return VINF_SUCCESS;
6328}
6329
6330
6331/**
6332 * Handle a condition that occurred while delivering an event through the guest or
6333 * nested-guest IDT.
6334 *
6335 * @returns Strict VBox status code (i.e. informational status codes too).
6336 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6337 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6338 * to continue execution of the guest which will delivery the \#DF.
6339 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6340 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6341 *
6342 * @param pVCpu The cross context virtual CPU structure.
6343 * @param pVmxTransient The VMX-transient structure.
6344 *
6345 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6346 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6347 * is due to an EPT violation, PML full or SPP-related event.
6348 *
6349 * @remarks No-long-jump zone!!!
6350 */
6351static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6352{
6353 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6354 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6355 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6356 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6357 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6358 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6359
6360 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6361 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6362 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6363 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6364 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6365 {
6366 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6367 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6368
6369 /*
6370 * If the event was a software interrupt (generated with INT n) or a software exception
6371 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6372 * can handle the VM-exit and continue guest execution which will re-execute the
6373 * instruction rather than re-injecting the exception, as that can cause premature
6374 * trips to ring-3 before injection and involve TRPM which currently has no way of
6375 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6376 * the problem).
6377 */
6378 IEMXCPTRAISE enmRaise;
6379 IEMXCPTRAISEINFO fRaiseInfo;
6380 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6381 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6382 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6383 {
6384 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6385 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6386 }
6387 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6388 {
6389 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6390 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6391 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6392
6393 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6394 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6395
6396 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6397
6398 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6399 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6400 {
6401 pVmxTransient->fVectoringPF = true;
6402 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6403 }
6404 }
6405 else
6406 {
6407 /*
6408 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6409 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6410 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6411 */
6412 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6413 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6414 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6415 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6416 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6417 }
6418
6419 /*
6420 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6421 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6422 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6423 * subsequent VM-entry would fail, see @bugref{7445}.
6424 *
6425 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6426 */
6427 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6428 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6429 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6430 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6431 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6432
6433 switch (enmRaise)
6434 {
6435 case IEMXCPTRAISE_CURRENT_XCPT:
6436 {
6437 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6438 Assert(rcStrict == VINF_SUCCESS);
6439 break;
6440 }
6441
6442 case IEMXCPTRAISE_PREV_EVENT:
6443 {
6444 uint32_t u32ErrCode;
6445 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6446 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6447 else
6448 u32ErrCode = 0;
6449
6450 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6451 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6452 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6453 pVCpu->cpum.GstCtx.cr2);
6454
6455 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6456 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6457 Assert(rcStrict == VINF_SUCCESS);
6458 break;
6459 }
6460
6461 case IEMXCPTRAISE_REEXEC_INSTR:
6462 Assert(rcStrict == VINF_SUCCESS);
6463 break;
6464
6465 case IEMXCPTRAISE_DOUBLE_FAULT:
6466 {
6467 /*
6468 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6469 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6470 */
6471 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6472 {
6473 pVmxTransient->fVectoringDoublePF = true;
6474 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6475 pVCpu->cpum.GstCtx.cr2));
6476 rcStrict = VINF_SUCCESS;
6477 }
6478 else
6479 {
6480 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6481 vmxHCSetPendingXcptDF(pVCpu);
6482 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6483 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6484 rcStrict = VINF_HM_DOUBLE_FAULT;
6485 }
6486 break;
6487 }
6488
6489 case IEMXCPTRAISE_TRIPLE_FAULT:
6490 {
6491 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6492 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6493 rcStrict = VINF_EM_RESET;
6494 break;
6495 }
6496
6497 case IEMXCPTRAISE_CPU_HANG:
6498 {
6499 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6500 rcStrict = VERR_EM_GUEST_CPU_HANG;
6501 break;
6502 }
6503
6504 default:
6505 {
6506 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6507 rcStrict = VERR_VMX_IPE_2;
6508 break;
6509 }
6510 }
6511 }
6512 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6513 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6514 {
6515 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6516 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6517 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6518 {
6519 /*
6520 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6521 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6522 * that virtual NMIs remain blocked until the IRET execution is completed.
6523 *
6524 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6525 */
6526 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6527 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6528 }
6529 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6530 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6531 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6532 {
6533 /*
6534 * Execution of IRET caused an EPT violation, page-modification log-full event or
6535 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6536 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6537 * that virtual NMIs remain blocked until the IRET execution is completed.
6538 *
6539 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6540 */
6541 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6542 {
6543 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6544 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6545 }
6546 }
6547 }
6548
6549 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6550 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6551 return rcStrict;
6552}
6553
6554
6555#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6556/**
6557 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6558 * guest attempting to execute a VMX instruction.
6559 *
6560 * @returns Strict VBox status code (i.e. informational status codes too).
6561 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6562 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6563 *
6564 * @param pVCpu The cross context virtual CPU structure.
6565 * @param uExitReason The VM-exit reason.
6566 *
6567 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6568 * @remarks No-long-jump zone!!!
6569 */
6570static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6571{
6572 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6573 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6574
6575 /*
6576 * The physical CPU would have already checked the CPU mode/code segment.
6577 * We shall just assert here for paranoia.
6578 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6579 */
6580 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6581 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6582 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6583
6584 if (uExitReason == VMX_EXIT_VMXON)
6585 {
6586 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6587
6588 /*
6589 * We check CR4.VMXE because it is required to be always set while in VMX operation
6590 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6591 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6592 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6593 */
6594 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6595 {
6596 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6597 vmxHCSetPendingXcptUD(pVCpu);
6598 return VINF_HM_PENDING_XCPT;
6599 }
6600 }
6601 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6602 {
6603 /*
6604 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6605 * (other than VMXON), we need to raise a #UD.
6606 */
6607 Log4Func(("Not in VMX root mode -> #UD\n"));
6608 vmxHCSetPendingXcptUD(pVCpu);
6609 return VINF_HM_PENDING_XCPT;
6610 }
6611
6612 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6613 return VINF_SUCCESS;
6614}
6615
6616
6617/**
6618 * Decodes the memory operand of an instruction that caused a VM-exit.
6619 *
6620 * The Exit qualification field provides the displacement field for memory
6621 * operand instructions, if any.
6622 *
6623 * @returns Strict VBox status code (i.e. informational status codes too).
6624 * @retval VINF_SUCCESS if the operand was successfully decoded.
6625 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6626 * operand.
6627 * @param pVCpu The cross context virtual CPU structure.
6628 * @param uExitInstrInfo The VM-exit instruction information field.
6629 * @param enmMemAccess The memory operand's access type (read or write).
6630 * @param GCPtrDisp The instruction displacement field, if any. For
6631 * RIP-relative addressing pass RIP + displacement here.
6632 * @param pGCPtrMem Where to store the effective destination memory address.
6633 *
6634 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6635 * virtual-8086 mode hence skips those checks while verifying if the
6636 * segment is valid.
6637 */
6638static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6639 PRTGCPTR pGCPtrMem)
6640{
6641 Assert(pGCPtrMem);
6642 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6643 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6644 | CPUMCTX_EXTRN_CR0);
6645
6646 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6647 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6648 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6649
6650 VMXEXITINSTRINFO ExitInstrInfo;
6651 ExitInstrInfo.u = uExitInstrInfo;
6652 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6653 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6654 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6655 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6656 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6657 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6658 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6659 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6660 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6661
6662 /*
6663 * Validate instruction information.
6664 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6665 */
6666 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6667 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6668 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6669 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6670 AssertLogRelMsgReturn(fIsMemOperand,
6671 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6672
6673 /*
6674 * Compute the complete effective address.
6675 *
6676 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6677 * See AMD spec. 4.5.2 "Segment Registers".
6678 */
6679 RTGCPTR GCPtrMem = GCPtrDisp;
6680 if (fBaseRegValid)
6681 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6682 if (fIdxRegValid)
6683 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6684
6685 RTGCPTR const GCPtrOff = GCPtrMem;
6686 if ( !fIsLongMode
6687 || iSegReg >= X86_SREG_FS)
6688 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6689 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6690
6691 /*
6692 * Validate effective address.
6693 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6694 */
6695 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6696 Assert(cbAccess > 0);
6697 if (fIsLongMode)
6698 {
6699 if (X86_IS_CANONICAL(GCPtrMem))
6700 {
6701 *pGCPtrMem = GCPtrMem;
6702 return VINF_SUCCESS;
6703 }
6704
6705 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6706 * "Data Limit Checks in 64-bit Mode". */
6707 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6708 vmxHCSetPendingXcptGP(pVCpu, 0);
6709 return VINF_HM_PENDING_XCPT;
6710 }
6711
6712 /*
6713 * This is a watered down version of iemMemApplySegment().
6714 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6715 * and segment CPL/DPL checks are skipped.
6716 */
6717 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6718 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6719 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6720
6721 /* Check if the segment is present and usable. */
6722 if ( pSel->Attr.n.u1Present
6723 && !pSel->Attr.n.u1Unusable)
6724 {
6725 Assert(pSel->Attr.n.u1DescType);
6726 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6727 {
6728 /* Check permissions for the data segment. */
6729 if ( enmMemAccess == VMXMEMACCESS_WRITE
6730 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6731 {
6732 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6733 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6734 return VINF_HM_PENDING_XCPT;
6735 }
6736
6737 /* Check limits if it's a normal data segment. */
6738 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6739 {
6740 if ( GCPtrFirst32 > pSel->u32Limit
6741 || GCPtrLast32 > pSel->u32Limit)
6742 {
6743 Log4Func(("Data segment limit exceeded. "
6744 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6745 GCPtrLast32, pSel->u32Limit));
6746 if (iSegReg == X86_SREG_SS)
6747 vmxHCSetPendingXcptSS(pVCpu, 0);
6748 else
6749 vmxHCSetPendingXcptGP(pVCpu, 0);
6750 return VINF_HM_PENDING_XCPT;
6751 }
6752 }
6753 else
6754 {
6755 /* Check limits if it's an expand-down data segment.
6756 Note! The upper boundary is defined by the B bit, not the G bit! */
6757 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6758 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6759 {
6760 Log4Func(("Expand-down data segment limit exceeded. "
6761 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6762 GCPtrLast32, pSel->u32Limit));
6763 if (iSegReg == X86_SREG_SS)
6764 vmxHCSetPendingXcptSS(pVCpu, 0);
6765 else
6766 vmxHCSetPendingXcptGP(pVCpu, 0);
6767 return VINF_HM_PENDING_XCPT;
6768 }
6769 }
6770 }
6771 else
6772 {
6773 /* Check permissions for the code segment. */
6774 if ( enmMemAccess == VMXMEMACCESS_WRITE
6775 || ( enmMemAccess == VMXMEMACCESS_READ
6776 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6777 {
6778 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6779 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6780 vmxHCSetPendingXcptGP(pVCpu, 0);
6781 return VINF_HM_PENDING_XCPT;
6782 }
6783
6784 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6785 if ( GCPtrFirst32 > pSel->u32Limit
6786 || GCPtrLast32 > pSel->u32Limit)
6787 {
6788 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6789 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6790 if (iSegReg == X86_SREG_SS)
6791 vmxHCSetPendingXcptSS(pVCpu, 0);
6792 else
6793 vmxHCSetPendingXcptGP(pVCpu, 0);
6794 return VINF_HM_PENDING_XCPT;
6795 }
6796 }
6797 }
6798 else
6799 {
6800 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6801 vmxHCSetPendingXcptGP(pVCpu, 0);
6802 return VINF_HM_PENDING_XCPT;
6803 }
6804
6805 *pGCPtrMem = GCPtrMem;
6806 return VINF_SUCCESS;
6807}
6808#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6809
6810
6811/**
6812 * VM-exit helper for LMSW.
6813 */
6814static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6815{
6816 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6817 AssertRCReturn(rc, rc);
6818
6819 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6820 AssertMsg( rcStrict == VINF_SUCCESS
6821 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6822
6823 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6824 if (rcStrict == VINF_IEM_RAISED_XCPT)
6825 {
6826 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6827 rcStrict = VINF_SUCCESS;
6828 }
6829
6830 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6831 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6832 return rcStrict;
6833}
6834
6835
6836/**
6837 * VM-exit helper for CLTS.
6838 */
6839static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6840{
6841 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6842 AssertRCReturn(rc, rc);
6843
6844 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6845 AssertMsg( rcStrict == VINF_SUCCESS
6846 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6847
6848 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6849 if (rcStrict == VINF_IEM_RAISED_XCPT)
6850 {
6851 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6852 rcStrict = VINF_SUCCESS;
6853 }
6854
6855 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6856 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6857 return rcStrict;
6858}
6859
6860
6861/**
6862 * VM-exit helper for MOV from CRx (CRx read).
6863 */
6864static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6865{
6866 Assert(iCrReg < 16);
6867 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6868
6869 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6870 AssertRCReturn(rc, rc);
6871
6872 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6873 AssertMsg( rcStrict == VINF_SUCCESS
6874 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6875
6876 if (iGReg == X86_GREG_xSP)
6877 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6878 else
6879 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6880#ifdef VBOX_WITH_STATISTICS
6881 switch (iCrReg)
6882 {
6883 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6884 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6885 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6886 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6887 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6888 }
6889#endif
6890 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6891 return rcStrict;
6892}
6893
6894
6895/**
6896 * VM-exit helper for MOV to CRx (CRx write).
6897 */
6898static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6899{
6900 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6901
6902 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6903 AssertMsg( rcStrict == VINF_SUCCESS
6904 || rcStrict == VINF_IEM_RAISED_XCPT
6905 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6906
6907 switch (iCrReg)
6908 {
6909 case 0:
6910 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6911 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6912 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6913 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6914 break;
6915
6916 case 2:
6917 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6918 /* Nothing to do here, CR2 it's not part of the VMCS. */
6919 break;
6920
6921 case 3:
6922 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6923 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6924 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6925 break;
6926
6927 case 4:
6928 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6929 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6930#ifndef IN_NEM_DARWIN
6931 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6932 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6933#else
6934 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6935#endif
6936 break;
6937
6938 case 8:
6939 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6940 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6941 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6942 break;
6943
6944 default:
6945 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6946 break;
6947 }
6948
6949 if (rcStrict == VINF_IEM_RAISED_XCPT)
6950 {
6951 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6952 rcStrict = VINF_SUCCESS;
6953 }
6954 return rcStrict;
6955}
6956
6957
6958/**
6959 * VM-exit exception handler for \#PF (Page-fault exception).
6960 *
6961 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6962 */
6963static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6964{
6965 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6966 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6967
6968#ifndef IN_NEM_DARWIN
6969 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6970 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6971 { /* likely */ }
6972 else
6973#endif
6974 {
6975#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6976 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6977#endif
6978 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6979 if (!pVmxTransient->fVectoringDoublePF)
6980 {
6981 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6982 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6983 }
6984 else
6985 {
6986 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6987 Assert(!pVmxTransient->fIsNestedGuest);
6988 vmxHCSetPendingXcptDF(pVCpu);
6989 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6990 }
6991 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6992 return VINF_SUCCESS;
6993 }
6994
6995 Assert(!pVmxTransient->fIsNestedGuest);
6996
6997 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6998 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6999 if (pVmxTransient->fVectoringPF)
7000 {
7001 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7002 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7003 }
7004
7005 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7006 AssertRCReturn(rc, rc);
7007
7008 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
7009 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
7010
7011 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
7012 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
7013
7014 Log4Func(("#PF: rc=%Rrc\n", rc));
7015 if (rc == VINF_SUCCESS)
7016 {
7017 /*
7018 * This is typically a shadow page table sync or a MMIO instruction. But we may have
7019 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
7020 */
7021 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7022 TRPMResetTrap(pVCpu);
7023 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
7024 return rc;
7025 }
7026
7027 if (rc == VINF_EM_RAW_GUEST_TRAP)
7028 {
7029 if (!pVmxTransient->fVectoringDoublePF)
7030 {
7031 /* It's a guest page fault and needs to be reflected to the guest. */
7032 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
7033 TRPMResetTrap(pVCpu);
7034 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
7035 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
7036 uGstErrorCode, pVmxTransient->uExitQual);
7037 }
7038 else
7039 {
7040 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
7041 TRPMResetTrap(pVCpu);
7042 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
7043 vmxHCSetPendingXcptDF(pVCpu);
7044 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
7045 }
7046
7047 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
7048 return VINF_SUCCESS;
7049 }
7050
7051 TRPMResetTrap(pVCpu);
7052 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
7053 return rc;
7054}
7055
7056
7057/**
7058 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
7059 *
7060 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7061 */
7062static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7063{
7064 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7065 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
7066
7067 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7068 AssertRCReturn(rc, rc);
7069
7070 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
7071 {
7072 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
7073 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
7074
7075 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
7076 * provides VM-exit instruction length. If this causes problem later,
7077 * disassemble the instruction like it's done on AMD-V. */
7078 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7079 AssertRCReturn(rc2, rc2);
7080 return rc;
7081 }
7082
7083 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
7084 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7085 return VINF_SUCCESS;
7086}
7087
7088
7089/**
7090 * VM-exit exception handler for \#BP (Breakpoint exception).
7091 *
7092 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7093 */
7094static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7095{
7096 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7097 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
7098
7099 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7100 AssertRCReturn(rc, rc);
7101
7102 VBOXSTRICTRC rcStrict;
7103 if (!pVmxTransient->fIsNestedGuest)
7104 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
7105 else
7106 rcStrict = VINF_EM_RAW_GUEST_TRAP;
7107
7108 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
7109 {
7110 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7111 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7112 rcStrict = VINF_SUCCESS;
7113 }
7114
7115 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
7116 return rcStrict;
7117}
7118
7119
7120/**
7121 * VM-exit exception handler for \#AC (Alignment-check exception).
7122 *
7123 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7124 */
7125static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7126{
7127 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7128
7129 /*
7130 * Detect #ACs caused by host having enabled split-lock detection.
7131 * Emulate such instructions.
7132 */
7133#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
7134 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7135 AssertRCReturn(rc, rc);
7136 /** @todo detect split lock in cpu feature? */
7137 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
7138 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7139 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
7140 || CPUMGetGuestCPL(pVCpu) != 3
7141 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
7142 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
7143 {
7144 /*
7145 * Check for debug/trace events and import state accordingly.
7146 */
7147 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
7148 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7149 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7150#ifndef IN_NEM_DARWIN
7151 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7152#endif
7153 )
7154 {
7155 if (pVM->cCpus == 1)
7156 {
7157#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7158 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7159 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7160#else
7161 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7162 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7163#endif
7164 AssertRCReturn(rc, rc);
7165 }
7166 }
7167 else
7168 {
7169 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7170 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7171 AssertRCReturn(rc, rc);
7172
7173 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7174
7175 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7176 {
7177 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7178 if (rcStrict != VINF_SUCCESS)
7179 return rcStrict;
7180 }
7181 }
7182
7183 /*
7184 * Emulate the instruction.
7185 *
7186 * We have to ignore the LOCK prefix here as we must not retrigger the
7187 * detection on the host. This isn't all that satisfactory, though...
7188 */
7189 if (pVM->cCpus == 1)
7190 {
7191 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7192 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7193
7194 /** @todo For SMP configs we should do a rendezvous here. */
7195 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7196 if (rcStrict == VINF_SUCCESS)
7197#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7198 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7199 HM_CHANGED_GUEST_RIP
7200 | HM_CHANGED_GUEST_RFLAGS
7201 | HM_CHANGED_GUEST_GPRS_MASK
7202 | HM_CHANGED_GUEST_CS
7203 | HM_CHANGED_GUEST_SS);
7204#else
7205 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7206#endif
7207 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7208 {
7209 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7210 rcStrict = VINF_SUCCESS;
7211 }
7212 return rcStrict;
7213 }
7214 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7215 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7216 return VINF_EM_EMULATE_SPLIT_LOCK;
7217 }
7218
7219 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7220 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7221 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7222
7223 /* Re-inject it. We'll detect any nesting before getting here. */
7224 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7225 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7226 return VINF_SUCCESS;
7227}
7228
7229
7230/**
7231 * VM-exit exception handler for \#DB (Debug exception).
7232 *
7233 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7234 */
7235static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7236{
7237 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7238 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7239
7240 /*
7241 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7242 */
7243 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7244
7245 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7246 uint64_t const uDR6 = X86_DR6_INIT_VAL
7247 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7248 | X86_DR6_BD | X86_DR6_BS));
7249 Log6Func(("uDR6=%#RX64 uExitQual=%#RX64\n", uDR6, pVmxTransient->uExitQual));
7250
7251 int rc;
7252 if (!pVmxTransient->fIsNestedGuest)
7253 {
7254 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7255
7256 /*
7257 * Prevents stepping twice over the same instruction when the guest is stepping using
7258 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7259 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7260 */
7261 if ( rc == VINF_EM_DBG_STEPPED
7262 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7263 {
7264 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7265 rc = VINF_EM_RAW_GUEST_TRAP;
7266 }
7267 }
7268 else
7269 rc = VINF_EM_RAW_GUEST_TRAP;
7270 Log6Func(("rc=%Rrc\n", rc));
7271 if (rc == VINF_EM_RAW_GUEST_TRAP)
7272 {
7273 /*
7274 * The exception was for the guest. Update DR6, DR7.GD and
7275 * IA32_DEBUGCTL.LBR before forwarding it.
7276 * See Intel spec. 27.1 "Architectural State before a VM-Exit"
7277 * and @sdmv3{077,622,17.2.3,Debug Status Register (DR6)}.
7278 */
7279#ifndef IN_NEM_DARWIN
7280 VMMRZCallRing3Disable(pVCpu);
7281 HM_DISABLE_PREEMPT(pVCpu);
7282
7283 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7284 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7285 if (CPUMIsGuestDebugStateActive(pVCpu))
7286 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7287
7288 HM_RESTORE_PREEMPT();
7289 VMMRZCallRing3Enable(pVCpu);
7290#else
7291 /** @todo */
7292#endif
7293
7294 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7295 AssertRCReturn(rc, rc);
7296
7297 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7298 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7299
7300 /* Paranoia. */
7301 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7302 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7303
7304 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7305 AssertRC(rc);
7306
7307 /*
7308 * Raise #DB in the guest.
7309 *
7310 * It is important to reflect exactly what the VM-exit gave us (preserving the
7311 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7312 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7313 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7314 *
7315 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7316 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7317 */
7318 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7319 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7320 return VINF_SUCCESS;
7321 }
7322
7323 /*
7324 * Not a guest trap, must be a hypervisor related debug event then.
7325 * Update DR6 in case someone is interested in it.
7326 */
7327 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7328 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7329 CPUMSetHyperDR6(pVCpu, uDR6);
7330
7331 return rc;
7332}
7333
7334
7335/**
7336 * Hacks its way around the lovely mesa driver's backdoor accesses.
7337 *
7338 * @sa hmR0SvmHandleMesaDrvGp.
7339 */
7340static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7341{
7342 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7343 RT_NOREF(pCtx);
7344
7345 /* For now we'll just skip the instruction. */
7346 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7347}
7348
7349
7350/**
7351 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7352 * backdoor logging w/o checking what it is running inside.
7353 *
7354 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7355 * backdoor port and magic numbers loaded in registers.
7356 *
7357 * @returns true if it is, false if it isn't.
7358 * @sa hmR0SvmIsMesaDrvGp.
7359 */
7360DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7361{
7362 /* 0xed: IN eAX,dx */
7363 uint8_t abInstr[1];
7364 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7365 return false;
7366
7367 /* Check that it is #GP(0). */
7368 if (pVmxTransient->uExitIntErrorCode != 0)
7369 return false;
7370
7371 /* Check magic and port. */
7372 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7373 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7374 if (pCtx->rax != UINT32_C(0x564d5868))
7375 return false;
7376 if (pCtx->dx != UINT32_C(0x5658))
7377 return false;
7378
7379 /* Flat ring-3 CS. */
7380 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7381 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7382 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7383 if (pCtx->cs.Attr.n.u2Dpl != 3)
7384 return false;
7385 if (pCtx->cs.u64Base != 0)
7386 return false;
7387
7388 /* Check opcode. */
7389 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7390 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7391 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7392 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7393 if (RT_FAILURE(rc))
7394 return false;
7395 if (abInstr[0] != 0xed)
7396 return false;
7397
7398 return true;
7399}
7400
7401
7402/**
7403 * VM-exit exception handler for \#GP (General-protection exception).
7404 *
7405 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7406 */
7407static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7408{
7409 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7410 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7411
7412 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7413 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7414#ifndef IN_NEM_DARWIN
7415 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7416 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7417 { /* likely */ }
7418 else
7419#endif
7420 {
7421#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7422# ifndef IN_NEM_DARWIN
7423 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7424# else
7425 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7426# endif
7427#endif
7428 /*
7429 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7430 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7431 */
7432 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7433 AssertRCReturn(rc, rc);
7434 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7435 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7436
7437 if ( pVmxTransient->fIsNestedGuest
7438 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7439 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7440 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7441 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7442 else
7443 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7444 return rc;
7445 }
7446
7447#ifndef IN_NEM_DARWIN
7448 Assert(CPUMIsGuestInRealModeEx(pCtx));
7449 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7450 Assert(!pVmxTransient->fIsNestedGuest);
7451
7452 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7453 AssertRCReturn(rc, rc);
7454
7455 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7456 if (rcStrict == VINF_SUCCESS)
7457 {
7458 if (!CPUMIsGuestInRealModeEx(pCtx))
7459 {
7460 /*
7461 * The guest is no longer in real-mode, check if we can continue executing the
7462 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7463 */
7464 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7465 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7466 {
7467 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7468 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7469 }
7470 else
7471 {
7472 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7473 rcStrict = VINF_EM_RESCHEDULE;
7474 }
7475 }
7476 else
7477 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7478 }
7479 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7480 {
7481 rcStrict = VINF_SUCCESS;
7482 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7483 }
7484 return VBOXSTRICTRC_VAL(rcStrict);
7485#endif
7486}
7487
7488
7489/**
7490 * VM-exit exception handler for \#DE (Divide Error).
7491 *
7492 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7493 */
7494static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7495{
7496 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7497 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7498
7499 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7500 AssertRCReturn(rc, rc);
7501
7502 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7503 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7504 {
7505 uint8_t cbInstr = 0;
7506 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7507 if (rc2 == VINF_SUCCESS)
7508 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7509 else if (rc2 == VERR_NOT_FOUND)
7510 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7511 else
7512 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7513 }
7514 else
7515 rcStrict = VINF_SUCCESS; /* Do nothing. */
7516
7517 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7518 if (RT_FAILURE(rcStrict))
7519 {
7520 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7521 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7522 rcStrict = VINF_SUCCESS;
7523 }
7524
7525 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7526 return VBOXSTRICTRC_VAL(rcStrict);
7527}
7528
7529
7530/**
7531 * VM-exit exception handler wrapper for all other exceptions that are not handled
7532 * by a specific handler.
7533 *
7534 * This simply re-injects the exception back into the VM without any special
7535 * processing.
7536 *
7537 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7538 */
7539static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7540{
7541 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7542
7543#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7544# ifndef IN_NEM_DARWIN
7545 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7546 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7547 ("uVector=%#x u32XcptBitmap=%#X32\n",
7548 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7549 NOREF(pVmcsInfo);
7550# endif
7551#endif
7552
7553 /*
7554 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7555 * would have been handled while checking exits due to event delivery.
7556 */
7557 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7558
7559#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7560 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7561 AssertRCReturn(rc, rc);
7562 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7563#endif
7564
7565#ifdef VBOX_WITH_STATISTICS
7566 switch (uVector)
7567 {
7568 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7569 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7570 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7571 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7572 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7573 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7574 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7575 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7576 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7577 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7578 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7579 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7580 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7581 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7582 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7583 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7584 default:
7585 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7586 break;
7587 }
7588#endif
7589
7590 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7591 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7592 NOREF(uVector);
7593
7594 /* Re-inject the original exception into the guest. */
7595 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7596 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7597 return VINF_SUCCESS;
7598}
7599
7600
7601/**
7602 * VM-exit exception handler for all exceptions (except NMIs!).
7603 *
7604 * @remarks This may be called for both guests and nested-guests. Take care to not
7605 * make assumptions and avoid doing anything that is not relevant when
7606 * executing a nested-guest (e.g., Mesa driver hacks).
7607 */
7608static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7609{
7610 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7611
7612 /*
7613 * If this VM-exit occurred while delivering an event through the guest IDT, take
7614 * action based on the return code and additional hints (e.g. for page-faults)
7615 * that will be updated in the VMX transient structure.
7616 */
7617 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7618 if (rcStrict == VINF_SUCCESS)
7619 {
7620 /*
7621 * If an exception caused a VM-exit due to delivery of an event, the original
7622 * event may have to be re-injected into the guest. We shall reinject it and
7623 * continue guest execution. However, page-fault is a complicated case and
7624 * needs additional processing done in vmxHCExitXcptPF().
7625 */
7626 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7627 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7628 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7629 || uVector == X86_XCPT_PF)
7630 {
7631 switch (uVector)
7632 {
7633 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7634 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7635 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7636 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7637 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7638 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7639 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7640 default:
7641 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7642 }
7643 }
7644 /* else: inject pending event before resuming guest execution. */
7645 }
7646 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7647 {
7648 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7649 rcStrict = VINF_SUCCESS;
7650 }
7651
7652 return rcStrict;
7653}
7654/** @} */
7655
7656
7657/** @name VM-exit handlers.
7658 * @{
7659 */
7660/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7661/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7662/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7663
7664/**
7665 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7666 */
7667HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7668{
7669 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7670 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7671
7672#ifndef IN_NEM_DARWIN
7673 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7674 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7675 return VINF_SUCCESS;
7676 return VINF_EM_RAW_INTERRUPT;
7677#else
7678 return VINF_SUCCESS;
7679#endif
7680}
7681
7682
7683/**
7684 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7685 * VM-exit.
7686 */
7687HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7688{
7689 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7690 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7691
7692 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7693
7694 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7695 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7696 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7697
7698 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7699 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7700 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7701 NOREF(pVmcsInfo);
7702
7703 VBOXSTRICTRC rcStrict;
7704 switch (uExitIntType)
7705 {
7706#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7707 /*
7708 * Host physical NMIs:
7709 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7710 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7711 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7712 *
7713 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7714 * See Intel spec. 27.5.5 "Updating Non-Register State".
7715 */
7716 case VMX_EXIT_INT_INFO_TYPE_NMI:
7717 {
7718 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7719 break;
7720 }
7721#endif
7722
7723 /*
7724 * Privileged software exceptions (#DB from ICEBP),
7725 * Software exceptions (#BP and #OF),
7726 * Hardware exceptions:
7727 * Process the required exceptions and resume guest execution if possible.
7728 */
7729 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7730 Assert(uVector == X86_XCPT_DB);
7731 RT_FALL_THRU();
7732 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7733 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7734 RT_FALL_THRU();
7735 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7736 {
7737 NOREF(uVector);
7738 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7739 | HMVMX_READ_EXIT_INSTR_LEN
7740 | HMVMX_READ_IDT_VECTORING_INFO
7741 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7742 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7743 break;
7744 }
7745
7746 default:
7747 {
7748 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7749 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7750 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7751 break;
7752 }
7753 }
7754
7755 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7756 return rcStrict;
7757}
7758
7759
7760/**
7761 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7762 */
7763HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7764{
7765 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7766
7767 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7768 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7769 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7770
7771 /* Evaluate and deliver pending events and resume guest execution. */
7772 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7773 return VINF_SUCCESS;
7774}
7775
7776
7777/**
7778 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7779 */
7780HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7781{
7782 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7783
7784 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7785 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7786 {
7787 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7788 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7789 }
7790
7791 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7792
7793 /*
7794 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7795 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7796 */
7797 uint32_t fIntrState;
7798 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7799 AssertRC(rc);
7800 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7801 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7802 {
7803 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7804
7805 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7806 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7807 AssertRC(rc);
7808 }
7809
7810 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready. */
7811 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7812
7813 /* Evaluate and deliver pending events and resume guest execution. */
7814 return VINF_SUCCESS;
7815}
7816
7817
7818/**
7819 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7820 */
7821HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7822{
7823 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7824 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7825}
7826
7827
7828/**
7829 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7830 */
7831HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7832{
7833 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7834 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7835}
7836
7837
7838/**
7839 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7840 */
7841HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7842{
7843 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7844
7845 /*
7846 * Get the state we need and update the exit history entry.
7847 */
7848 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7849 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7850 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7851 AssertRCReturn(rc, rc);
7852
7853 VBOXSTRICTRC rcStrict;
7854 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7855 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7856 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7857 if (!pExitRec)
7858 {
7859 /*
7860 * Regular CPUID instruction execution.
7861 */
7862 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7863 if (rcStrict == VINF_SUCCESS)
7864 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7865 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7866 {
7867 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7868 rcStrict = VINF_SUCCESS;
7869 }
7870 }
7871 else
7872 {
7873 /*
7874 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7875 */
7876 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7877 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7878 AssertRCReturn(rc2, rc2);
7879
7880 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7881 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7882
7883 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7884 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7885
7886 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7887 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7888 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7889 }
7890 return rcStrict;
7891}
7892
7893
7894/**
7895 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7896 */
7897HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7898{
7899 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7900
7901 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7902 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7903 AssertRCReturn(rc, rc);
7904
7905 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7906 return VINF_EM_RAW_EMULATE_INSTR;
7907
7908 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7909 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7910}
7911
7912
7913/**
7914 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7915 */
7916HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7917{
7918 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7919
7920 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7921 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7922 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7923 AssertRCReturn(rc, rc);
7924
7925 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7926 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7927 {
7928 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7929 we must reset offsetting on VM-entry. See @bugref{6634}. */
7930 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7931 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7932 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7933 }
7934 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7935 {
7936 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7937 rcStrict = VINF_SUCCESS;
7938 }
7939 return rcStrict;
7940}
7941
7942
7943/**
7944 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7945 */
7946HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7947{
7948 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7949
7950 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7951 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7952 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7953 AssertRCReturn(rc, rc);
7954
7955 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7956 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7957 {
7958 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7959 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7960 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7961 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7962 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7963 }
7964 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7965 {
7966 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7967 rcStrict = VINF_SUCCESS;
7968 }
7969 return rcStrict;
7970}
7971
7972
7973/**
7974 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7975 */
7976HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7977{
7978 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7979
7980 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7981 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7982 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7983 AssertRCReturn(rc, rc);
7984
7985 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
7986 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7987 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7988 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7989 {
7990 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7991 rcStrict = VINF_SUCCESS;
7992 }
7993 return rcStrict;
7994}
7995
7996
7997/**
7998 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7999 */
8000HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8001{
8002 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8003
8004 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
8005 if (EMAreHypercallInstructionsEnabled(pVCpu))
8006 {
8007 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8008 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
8009 | CPUMCTX_EXTRN_RFLAGS
8010 | CPUMCTX_EXTRN_CR0
8011 | CPUMCTX_EXTRN_SS
8012 | CPUMCTX_EXTRN_CS
8013 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
8014 AssertRCReturn(rc, rc);
8015
8016 /* Perform the hypercall. */
8017 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
8018 if (rcStrict == VINF_SUCCESS)
8019 {
8020 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8021 AssertRCReturn(rc, rc);
8022 }
8023 else
8024 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
8025 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
8026 || RT_FAILURE(rcStrict));
8027
8028 /* If the hypercall changes anything other than guest's general-purpose registers,
8029 we would need to reload the guest changed bits here before VM-entry. */
8030 }
8031 else
8032 Log4Func(("Hypercalls not enabled\n"));
8033
8034 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
8035 if (RT_FAILURE(rcStrict))
8036 {
8037 vmxHCSetPendingXcptUD(pVCpu);
8038 rcStrict = VINF_SUCCESS;
8039 }
8040
8041 return rcStrict;
8042}
8043
8044
8045/**
8046 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
8047 */
8048HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8049{
8050 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8051#ifndef IN_NEM_DARWIN
8052 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
8053#endif
8054
8055 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8056 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8057 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8058 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8059 AssertRCReturn(rc, rc);
8060
8061 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
8062
8063 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
8064 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8065 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8066 {
8067 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8068 rcStrict = VINF_SUCCESS;
8069 }
8070 else
8071 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
8072 VBOXSTRICTRC_VAL(rcStrict)));
8073 return rcStrict;
8074}
8075
8076
8077/**
8078 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
8079 */
8080HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8081{
8082 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8083
8084 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8085 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8086 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
8087 AssertRCReturn(rc, rc);
8088
8089 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
8090 if (rcStrict == VINF_SUCCESS)
8091 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8092 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8093 {
8094 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8095 rcStrict = VINF_SUCCESS;
8096 }
8097
8098 return rcStrict;
8099}
8100
8101
8102/**
8103 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
8104 */
8105HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8106{
8107 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8108
8109 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8110 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8111 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8112 AssertRCReturn(rc, rc);
8113
8114 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
8115 if (RT_SUCCESS(rcStrict))
8116 {
8117 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8118 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
8119 rcStrict = VINF_SUCCESS;
8120 }
8121
8122 return rcStrict;
8123}
8124
8125
8126/**
8127 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
8128 * VM-exit.
8129 */
8130HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8131{
8132 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8133 return VINF_EM_RESET;
8134}
8135
8136
8137/**
8138 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8139 */
8140HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8141{
8142 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8143
8144 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8145 AssertRCReturn(rc, rc);
8146
8147 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
8148 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8149 rc = VINF_SUCCESS;
8150 else
8151 rc = VINF_EM_HALT;
8152
8153 if (rc != VINF_SUCCESS)
8154 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8155 return rc;
8156}
8157
8158
8159#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8160/**
8161 * VM-exit handler for instructions that result in a \#UD exception delivered to
8162 * the guest.
8163 */
8164HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8165{
8166 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8167 vmxHCSetPendingXcptUD(pVCpu);
8168 return VINF_SUCCESS;
8169}
8170#endif
8171
8172
8173/**
8174 * VM-exit handler for expiry of the VMX-preemption timer.
8175 */
8176HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8177{
8178 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8179
8180 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8181 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8182Log12(("vmxHCExitPreemptTimer:\n"));
8183
8184 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8185 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8186 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8187 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8188 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8189}
8190
8191
8192/**
8193 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8194 */
8195HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8196{
8197 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8198
8199 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8200 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8201 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8202 AssertRCReturn(rc, rc);
8203
8204 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8205 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8206 : HM_CHANGED_RAISED_XCPT_MASK);
8207
8208#ifndef IN_NEM_DARWIN
8209 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8210 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8211 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8212 {
8213 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8214 hmR0VmxUpdateStartVmFunction(pVCpu);
8215 }
8216#endif
8217
8218 return rcStrict;
8219}
8220
8221
8222/**
8223 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8224 */
8225HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8226{
8227 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8228
8229 /** @todo Enable the new code after finding a reliably guest test-case. */
8230#if 1
8231 return VERR_EM_INTERPRETER;
8232#else
8233 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8234 | HMVMX_READ_EXIT_INSTR_INFO
8235 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8236 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8237 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8238 AssertRCReturn(rc, rc);
8239
8240 /* Paranoia. Ensure this has a memory operand. */
8241 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8242
8243 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8244 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8245 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8246 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8247
8248 RTGCPTR GCPtrDesc;
8249 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8250
8251 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8252 GCPtrDesc, uType);
8253 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8254 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8255 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8256 {
8257 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8258 rcStrict = VINF_SUCCESS;
8259 }
8260 return rcStrict;
8261#endif
8262}
8263
8264
8265/**
8266 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8267 * VM-exit.
8268 */
8269HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8270{
8271 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8272 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8273 AssertRCReturn(rc, rc);
8274
8275 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8276 if (RT_FAILURE(rc))
8277 return rc;
8278
8279 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8280 NOREF(uInvalidReason);
8281
8282#ifdef VBOX_STRICT
8283 uint32_t fIntrState;
8284 uint64_t u64Val;
8285 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8286 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8287 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8288
8289 Log4(("uInvalidReason %u\n", uInvalidReason));
8290 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8291 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8292 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8293
8294 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8295 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8296 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8297 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8298 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8299 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8300 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8301 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8302 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8303 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8304 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8305 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8306# ifndef IN_NEM_DARWIN
8307 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8308 {
8309 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8310 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8311 }
8312
8313 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8314# endif
8315#endif
8316
8317 return VERR_VMX_INVALID_GUEST_STATE;
8318}
8319
8320/**
8321 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8322 */
8323HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8324{
8325 /*
8326 * Cumulative notes of all recognized but unexpected VM-exits.
8327 *
8328 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8329 * nested-paging is used.
8330 *
8331 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8332 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8333 * this function (and thereby stop VM execution) for handling such instructions.
8334 *
8335 *
8336 * VMX_EXIT_INIT_SIGNAL:
8337 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8338 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8339 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8340 *
8341 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8342 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8343 * See Intel spec. "23.8 Restrictions on VMX operation".
8344 *
8345 * VMX_EXIT_SIPI:
8346 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8347 * activity state is used. We don't make use of it as our guests don't have direct
8348 * access to the host local APIC.
8349 *
8350 * See Intel spec. 25.3 "Other Causes of VM-exits".
8351 *
8352 * VMX_EXIT_IO_SMI:
8353 * VMX_EXIT_SMI:
8354 * This can only happen if we support dual-monitor treatment of SMI, which can be
8355 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8356 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8357 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8358 *
8359 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8360 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8361 *
8362 * VMX_EXIT_ERR_MSR_LOAD:
8363 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8364 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8365 * execution.
8366 *
8367 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8368 *
8369 * VMX_EXIT_ERR_MACHINE_CHECK:
8370 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8371 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8372 * #MC exception abort class exception is raised. We thus cannot assume a
8373 * reasonable chance of continuing any sort of execution and we bail.
8374 *
8375 * See Intel spec. 15.1 "Machine-check Architecture".
8376 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8377 *
8378 * VMX_EXIT_PML_FULL:
8379 * VMX_EXIT_VIRTUALIZED_EOI:
8380 * VMX_EXIT_APIC_WRITE:
8381 * We do not currently support any of these features and thus they are all unexpected
8382 * VM-exits.
8383 *
8384 * VMX_EXIT_GDTR_IDTR_ACCESS:
8385 * VMX_EXIT_LDTR_TR_ACCESS:
8386 * VMX_EXIT_RDRAND:
8387 * VMX_EXIT_RSM:
8388 * VMX_EXIT_VMFUNC:
8389 * VMX_EXIT_ENCLS:
8390 * VMX_EXIT_RDSEED:
8391 * VMX_EXIT_XSAVES:
8392 * VMX_EXIT_XRSTORS:
8393 * VMX_EXIT_UMWAIT:
8394 * VMX_EXIT_TPAUSE:
8395 * VMX_EXIT_LOADIWKEY:
8396 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8397 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8398 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8399 *
8400 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8401 */
8402 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8403 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8404 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8405}
8406
8407
8408/**
8409 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8410 */
8411HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8412{
8413 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8414
8415 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8416
8417 /** @todo Optimize this: We currently drag in the whole MSR state
8418 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8419 * MSRs required. That would require changes to IEM and possibly CPUM too.
8420 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8421 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8422 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8423 int rc;
8424 switch (idMsr)
8425 {
8426 default:
8427 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8428 __FUNCTION__);
8429 AssertRCReturn(rc, rc);
8430 break;
8431 case MSR_K8_FS_BASE:
8432 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8433 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8434 AssertRCReturn(rc, rc);
8435 break;
8436 case MSR_K8_GS_BASE:
8437 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8438 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8439 AssertRCReturn(rc, rc);
8440 break;
8441 }
8442
8443 Log4Func(("ecx=%#RX32\n", idMsr));
8444
8445#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8446 Assert(!pVmxTransient->fIsNestedGuest);
8447 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8448 {
8449 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8450 && idMsr != MSR_K6_EFER)
8451 {
8452 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8453 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8454 }
8455 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8456 {
8457 Assert(pVmcsInfo->pvMsrBitmap);
8458 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8459 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8460 {
8461 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8462 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8463 }
8464 }
8465 }
8466#endif
8467
8468 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8469 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8470 if (rcStrict == VINF_SUCCESS)
8471 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8472 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8473 {
8474 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8475 rcStrict = VINF_SUCCESS;
8476 }
8477 else
8478 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8479 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8480
8481 return rcStrict;
8482}
8483
8484
8485/**
8486 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8487 */
8488HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8489{
8490 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8491
8492 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8493
8494 /*
8495 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8496 * Although we don't need to fetch the base as it will be overwritten shortly, while
8497 * loading guest-state we would also load the entire segment register including limit
8498 * and attributes and thus we need to load them here.
8499 */
8500 /** @todo Optimize this: We currently drag in the whole MSR state
8501 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8502 * MSRs required. That would require changes to IEM and possibly CPUM too.
8503 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8504 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8505 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8506 int rc;
8507 switch (idMsr)
8508 {
8509 default:
8510 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8511 __FUNCTION__);
8512 AssertRCReturn(rc, rc);
8513 break;
8514
8515 case MSR_K8_FS_BASE:
8516 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8517 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8518 AssertRCReturn(rc, rc);
8519 break;
8520 case MSR_K8_GS_BASE:
8521 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8522 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8523 AssertRCReturn(rc, rc);
8524 break;
8525 }
8526 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8527
8528 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8529 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8530
8531 if (rcStrict == VINF_SUCCESS)
8532 {
8533 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8534
8535 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8536 if ( idMsr == MSR_IA32_APICBASE
8537 || ( idMsr >= MSR_IA32_X2APIC_START
8538 && idMsr <= MSR_IA32_X2APIC_END))
8539 {
8540 /*
8541 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8542 * When full APIC register virtualization is implemented we'll have to make
8543 * sure APIC state is saved from the VMCS before IEM changes it.
8544 */
8545 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8546 }
8547 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8548 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8549 else if (idMsr == MSR_K6_EFER)
8550 {
8551 /*
8552 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8553 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8554 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8555 */
8556 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8557 }
8558
8559 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8560 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8561 {
8562 switch (idMsr)
8563 {
8564 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8565 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8566 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8567 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8568 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8569 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8570 default:
8571 {
8572#ifndef IN_NEM_DARWIN
8573 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8574 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8575 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8576 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8577#else
8578 AssertMsgFailed(("TODO\n"));
8579#endif
8580 break;
8581 }
8582 }
8583 }
8584#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8585 else
8586 {
8587 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8588 switch (idMsr)
8589 {
8590 case MSR_IA32_SYSENTER_CS:
8591 case MSR_IA32_SYSENTER_EIP:
8592 case MSR_IA32_SYSENTER_ESP:
8593 case MSR_K8_FS_BASE:
8594 case MSR_K8_GS_BASE:
8595 {
8596 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8597 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8598 }
8599
8600 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8601 default:
8602 {
8603 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8604 {
8605 /* EFER MSR writes are always intercepted. */
8606 if (idMsr != MSR_K6_EFER)
8607 {
8608 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8609 idMsr));
8610 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8611 }
8612 }
8613
8614 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8615 {
8616 Assert(pVmcsInfo->pvMsrBitmap);
8617 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8618 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8619 {
8620 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8621 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8622 }
8623 }
8624 break;
8625 }
8626 }
8627 }
8628#endif /* VBOX_STRICT */
8629 }
8630 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8631 {
8632 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8633 rcStrict = VINF_SUCCESS;
8634 }
8635 else
8636 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8637 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8638
8639 return rcStrict;
8640}
8641
8642
8643/**
8644 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8645 */
8646HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8647{
8648 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8649
8650 /** @todo The guest has likely hit a contended spinlock. We might want to
8651 * poke a schedule different guest VCPU. */
8652 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8653 if (RT_SUCCESS(rc))
8654 return VINF_EM_RAW_INTERRUPT;
8655
8656 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8657 return rc;
8658}
8659
8660
8661/**
8662 * VM-exit handler for when the TPR value is lowered below the specified
8663 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8664 */
8665HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8666{
8667 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8668 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8669
8670 /*
8671 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8672 * We'll re-evaluate pending interrupts and inject them before the next VM
8673 * entry so we can just continue execution here.
8674 */
8675 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8676 return VINF_SUCCESS;
8677}
8678
8679
8680/**
8681 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8682 * VM-exit.
8683 *
8684 * @retval VINF_SUCCESS when guest execution can continue.
8685 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8686 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8687 * incompatible guest state for VMX execution (real-on-v86 case).
8688 */
8689HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8690{
8691 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8692 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8693
8694 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8695 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8696 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8697
8698 VBOXSTRICTRC rcStrict;
8699 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8700 uint64_t const uExitQual = pVmxTransient->uExitQual;
8701 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8702 switch (uAccessType)
8703 {
8704 /*
8705 * MOV to CRx.
8706 */
8707 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8708 {
8709 /*
8710 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8711 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8712 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8713 * PAE PDPTEs as well.
8714 */
8715 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8716 AssertRCReturn(rc, rc);
8717
8718 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8719#ifndef IN_NEM_DARWIN
8720 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8721#endif
8722 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8723 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8724
8725 /*
8726 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8727 * - When nested paging isn't used.
8728 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8729 * - We are executing in the VM debug loop.
8730 */
8731#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8732# ifndef IN_NEM_DARWIN
8733 Assert( iCrReg != 3
8734 || !VM_IS_VMX_NESTED_PAGING(pVM)
8735 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8736 || pVCpu->hmr0.s.fUsingDebugLoop);
8737# else
8738 Assert( iCrReg != 3
8739 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8740# endif
8741#endif
8742
8743 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8744 Assert( iCrReg != 8
8745 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8746
8747 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8748 AssertMsg( rcStrict == VINF_SUCCESS
8749 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8750
8751#ifndef IN_NEM_DARWIN
8752 /*
8753 * This is a kludge for handling switches back to real mode when we try to use
8754 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8755 * deal with special selector values, so we have to return to ring-3 and run
8756 * there till the selector values are V86 mode compatible.
8757 *
8758 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8759 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8760 * this function.
8761 */
8762 if ( iCrReg == 0
8763 && rcStrict == VINF_SUCCESS
8764 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8765 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8766 && (uOldCr0 & X86_CR0_PE)
8767 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8768 {
8769 /** @todo Check selectors rather than returning all the time. */
8770 Assert(!pVmxTransient->fIsNestedGuest);
8771 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8772 rcStrict = VINF_EM_RESCHEDULE_REM;
8773 }
8774#endif
8775
8776 break;
8777 }
8778
8779 /*
8780 * MOV from CRx.
8781 */
8782 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8783 {
8784 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8785 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8786
8787 /*
8788 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8789 * - When nested paging isn't used.
8790 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8791 * - We are executing in the VM debug loop.
8792 */
8793#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8794# ifndef IN_NEM_DARWIN
8795 Assert( iCrReg != 3
8796 || !VM_IS_VMX_NESTED_PAGING(pVM)
8797 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8798 || pVCpu->hmr0.s.fLeaveDone);
8799# else
8800 Assert( iCrReg != 3
8801 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8802# endif
8803#endif
8804
8805 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8806 Assert( iCrReg != 8
8807 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8808
8809 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8810 break;
8811 }
8812
8813 /*
8814 * CLTS (Clear Task-Switch Flag in CR0).
8815 */
8816 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8817 {
8818 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8819 break;
8820 }
8821
8822 /*
8823 * LMSW (Load Machine-Status Word into CR0).
8824 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8825 */
8826 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8827 {
8828 RTGCPTR GCPtrEffDst;
8829 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8830 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8831 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8832 if (fMemOperand)
8833 {
8834 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8835 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8836 }
8837 else
8838 GCPtrEffDst = NIL_RTGCPTR;
8839 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8840 break;
8841 }
8842
8843 default:
8844 {
8845 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8846 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8847 }
8848 }
8849
8850 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8851 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8852 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8853
8854 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8855 NOREF(pVM);
8856 return rcStrict;
8857}
8858
8859
8860/**
8861 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8862 * VM-exit.
8863 */
8864HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8865{
8866 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8867 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8868
8869 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8870 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8871 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8872 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8873#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8874 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8875 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8876 AssertRCReturn(rc, rc);
8877
8878 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8879 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8880 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8881 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8882 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8883 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8884 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8885 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8886
8887 /*
8888 * Update exit history to see if this exit can be optimized.
8889 */
8890 VBOXSTRICTRC rcStrict;
8891 PCEMEXITREC pExitRec = NULL;
8892 if ( !fGstStepping
8893 && !fDbgStepping)
8894 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8895 !fIOString
8896 ? !fIOWrite
8897 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8898 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8899 : !fIOWrite
8900 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8901 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8902 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8903 if (!pExitRec)
8904 {
8905 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8906 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8907
8908 uint32_t const cbValue = s_aIOSizes[uIOSize];
8909 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8910 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8911 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8912 if (fIOString)
8913 {
8914 /*
8915 * INS/OUTS - I/O String instruction.
8916 *
8917 * Use instruction-information if available, otherwise fall back on
8918 * interpreting the instruction.
8919 */
8920 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8921 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8922 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8923 if (fInsOutsInfo)
8924 {
8925 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8926 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8927 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8928 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8929 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8930 if (fIOWrite)
8931 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8932 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8933 else
8934 {
8935 /*
8936 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8937 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8938 * See Intel Instruction spec. for "INS".
8939 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8940 */
8941 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8942 }
8943 }
8944 else
8945 rcStrict = IEMExecOne(pVCpu);
8946
8947 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8948 fUpdateRipAlready = true;
8949 }
8950 else
8951 {
8952 /*
8953 * IN/OUT - I/O instruction.
8954 */
8955 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8956 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8957 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8958 if (fIOWrite)
8959 {
8960 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8961 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8962#ifndef IN_NEM_DARWIN
8963 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8964 && !pCtx->eflags.Bits.u1TF)
8965 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8966#endif
8967 }
8968 else
8969 {
8970 uint32_t u32Result = 0;
8971 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8972 if (IOM_SUCCESS(rcStrict))
8973 {
8974 /* Save result of I/O IN instr. in AL/AX/EAX. */
8975 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8976 }
8977#ifndef IN_NEM_DARWIN
8978 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8979 && !pCtx->eflags.Bits.u1TF)
8980 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8981#endif
8982 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8983 }
8984 }
8985
8986 if (IOM_SUCCESS(rcStrict))
8987 {
8988 if (!fUpdateRipAlready)
8989 {
8990 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8991 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8992 }
8993
8994 /*
8995 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8996 * while booting Fedora 17 64-bit guest.
8997 *
8998 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8999 */
9000 if (fIOString)
9001 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
9002
9003 /*
9004 * If any I/O breakpoints are armed, we need to check if one triggered
9005 * and take appropriate action.
9006 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
9007 */
9008#if 1
9009 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
9010#else
9011 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
9012 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
9013 AssertRCReturn(rc, rc);
9014#endif
9015
9016 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
9017 * execution engines about whether hyper BPs and such are pending. */
9018 uint32_t const uDr7 = pCtx->dr[7];
9019 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
9020 && X86_DR7_ANY_RW_IO(uDr7)
9021 && (pCtx->cr4 & X86_CR4_DE))
9022 || DBGFBpIsHwIoArmed(pVM)))
9023 {
9024 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
9025
9026#ifndef IN_NEM_DARWIN
9027 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
9028 VMMRZCallRing3Disable(pVCpu);
9029 HM_DISABLE_PREEMPT(pVCpu);
9030
9031 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
9032
9033 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
9034 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
9035 {
9036 /* Raise #DB. */
9037 if (fIsGuestDbgActive)
9038 ASMSetDR6(pCtx->dr[6]);
9039 if (pCtx->dr[7] != uDr7)
9040 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
9041
9042 vmxHCSetPendingXcptDB(pVCpu);
9043 }
9044 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
9045 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
9046 else if ( rcStrict2 != VINF_SUCCESS
9047 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
9048 rcStrict = rcStrict2;
9049 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
9050
9051 HM_RESTORE_PREEMPT();
9052 VMMRZCallRing3Enable(pVCpu);
9053#else
9054 /** @todo */
9055#endif
9056 }
9057 }
9058
9059#ifdef VBOX_STRICT
9060 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
9061 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
9062 Assert(!fIOWrite);
9063 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
9064 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
9065 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
9066 Assert(fIOWrite);
9067 else
9068 {
9069# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
9070 * statuses, that the VMM device and some others may return. See
9071 * IOM_SUCCESS() for guidance. */
9072 AssertMsg( RT_FAILURE(rcStrict)
9073 || rcStrict == VINF_SUCCESS
9074 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
9075 || rcStrict == VINF_EM_DBG_BREAKPOINT
9076 || rcStrict == VINF_EM_RAW_GUEST_TRAP
9077 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9078# endif
9079 }
9080#endif
9081 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
9082 }
9083 else
9084 {
9085 /*
9086 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
9087 */
9088 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
9089 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
9090 AssertRCReturn(rc2, rc2);
9091 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
9092 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
9093 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
9094 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9095 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
9096 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
9097
9098 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9099 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9100
9101 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9102 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9103 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9104 }
9105 return rcStrict;
9106}
9107
9108
9109/**
9110 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
9111 * VM-exit.
9112 */
9113HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9114{
9115 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9116
9117 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
9118 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9119 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
9120 {
9121 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
9122 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9123 {
9124 uint32_t uErrCode;
9125 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
9126 {
9127 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9128 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
9129 }
9130 else
9131 uErrCode = 0;
9132
9133 RTGCUINTPTR GCPtrFaultAddress;
9134 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
9135 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
9136 else
9137 GCPtrFaultAddress = 0;
9138
9139 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9140
9141 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
9142 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
9143
9144 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
9145 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
9146 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9147 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9148 }
9149 }
9150
9151 /* Fall back to the interpreter to emulate the task-switch. */
9152 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9153 return VERR_EM_INTERPRETER;
9154}
9155
9156
9157/**
9158 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9159 */
9160HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9161{
9162 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9163
9164 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9165 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9166 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9167 AssertRC(rc);
9168 return VINF_EM_DBG_STEPPED;
9169}
9170
9171
9172/**
9173 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9174 */
9175HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9176{
9177 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9178 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9179
9180 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9181 | HMVMX_READ_EXIT_INSTR_LEN
9182 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9183 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9184 | HMVMX_READ_IDT_VECTORING_INFO
9185 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9186
9187 /*
9188 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9189 */
9190 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9191 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9192 {
9193 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9194 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9195 {
9196 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9197 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9198 }
9199 }
9200 else
9201 {
9202 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9203 return rcStrict;
9204 }
9205
9206 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9207 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9208 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9209 AssertRCReturn(rc, rc);
9210
9211 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9212 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9213 switch (uAccessType)
9214 {
9215#ifndef IN_NEM_DARWIN
9216 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9217 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9218 {
9219 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9220 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9221 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9222
9223 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9224 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9225 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9226 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9227 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9228
9229 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9230 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9231 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9232 if ( rcStrict == VINF_SUCCESS
9233 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9234 || rcStrict == VERR_PAGE_NOT_PRESENT)
9235 {
9236 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9237 | HM_CHANGED_GUEST_APIC_TPR);
9238 rcStrict = VINF_SUCCESS;
9239 }
9240 break;
9241 }
9242#else
9243 /** @todo */
9244#endif
9245
9246 default:
9247 {
9248 Log4Func(("uAccessType=%#x\n", uAccessType));
9249 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9250 break;
9251 }
9252 }
9253
9254 if (rcStrict != VINF_SUCCESS)
9255 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9256 return rcStrict;
9257}
9258
9259
9260/**
9261 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9262 * VM-exit.
9263 */
9264HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9265{
9266 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9267 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9268
9269 /*
9270 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9271 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9272 * must emulate the MOV DRx access.
9273 */
9274 if (!pVmxTransient->fIsNestedGuest)
9275 {
9276 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9277 if ( pVmxTransient->fWasGuestDebugStateActive
9278#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9279 && !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx
9280#endif
9281 )
9282 {
9283 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9284 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9285 }
9286
9287 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9288 && !pVmxTransient->fWasHyperDebugStateActive)
9289 {
9290 Assert(!DBGFIsStepping(pVCpu));
9291 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9292
9293 /* Whether we disable intercepting MOV DRx instructions and resume
9294 the current one, or emulate it and keep intercepting them is
9295 configurable. Though it usually comes down to whether there are
9296 any new DR6 & DR7 bits (RTM) we want to hide from the guest. */
9297#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9298 bool const fResumeInstruction = !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx;
9299#else
9300 bool const fResumeInstruction = true;
9301#endif
9302 if (fResumeInstruction)
9303 {
9304 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9305 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9306 AssertRC(rc);
9307 }
9308
9309#ifndef IN_NEM_DARWIN
9310 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9311 VMMRZCallRing3Disable(pVCpu);
9312 HM_DISABLE_PREEMPT(pVCpu);
9313
9314 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9315 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9316 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9317
9318 HM_RESTORE_PREEMPT();
9319 VMMRZCallRing3Enable(pVCpu);
9320#else
9321 CPUMR3NemActivateGuestDebugState(pVCpu);
9322 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9323 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9324#endif
9325
9326 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9327 if (fResumeInstruction)
9328 {
9329#ifdef VBOX_WITH_STATISTICS
9330 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9331 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9332 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9333 else
9334 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9335#endif
9336 return VINF_SUCCESS;
9337 }
9338 }
9339 }
9340
9341 /*
9342 * Import state. We must have DR7 loaded here as it's always consulted,
9343 * both for reading and writing. The other debug registers are never
9344 * exported as such.
9345 */
9346 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9347 int rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
9348 | CPUMCTX_EXTRN_GPRS_MASK
9349 | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9350 AssertRCReturn(rc, rc);
9351
9352 uint8_t const iGReg = VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual);
9353 uint8_t const iDrReg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9354 Log4Func(("cs:rip=%#04x:%08RX64 r%d %s dr%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iGReg,
9355 VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE ? "->" : "<-", iDrReg));
9356
9357 VBOXSTRICTRC rcStrict;
9358 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9359 {
9360 /*
9361 * Write DRx register.
9362 */
9363 rcStrict = IEMExecDecodedMovDRxWrite(pVCpu, pVmxTransient->cbExitInstr, iDrReg, iGReg);
9364 AssertMsg( rcStrict == VINF_SUCCESS
9365 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9366
9367 if (rcStrict == VINF_SUCCESS)
9368 {
9369 /** @todo r=bird: Not sure why we always flag DR7 as modified here, but I've
9370 * kept it for now to avoid breaking something non-obvious. */
9371 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9372 | HM_CHANGED_GUEST_DR7);
9373 /* Update the DR6 register if guest debug state is active, otherwise we'll
9374 trash it when calling CPUMR0DebugStateMaybeSaveGuestAndRestoreHost. */
9375 if (iDrReg == 6 && CPUMIsGuestDebugStateActive(pVCpu))
9376 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
9377 Log4Func(("r%d=%#RX64 => dr%d=%#RX64\n", iGReg, pVCpu->cpum.GstCtx.aGRegs[iGReg].u,
9378 iDrReg, pVCpu->cpum.GstCtx.dr[iDrReg]));
9379 }
9380 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9381 {
9382 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9383 rcStrict = VINF_SUCCESS;
9384 }
9385
9386 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9387 }
9388 else
9389 {
9390 /*
9391 * Read DRx register into a general purpose register.
9392 */
9393 rcStrict = IEMExecDecodedMovDRxRead(pVCpu, pVmxTransient->cbExitInstr, iGReg, iDrReg);
9394 AssertMsg( rcStrict == VINF_SUCCESS
9395 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9396
9397 if (rcStrict == VINF_SUCCESS)
9398 {
9399 if (iGReg == X86_GREG_xSP)
9400 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9401 | HM_CHANGED_GUEST_RSP);
9402 else
9403 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9404 }
9405 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9406 {
9407 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9408 rcStrict = VINF_SUCCESS;
9409 }
9410
9411 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9412 }
9413
9414 return rcStrict;
9415}
9416
9417
9418/**
9419 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9420 * Conditional VM-exit.
9421 */
9422HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9423{
9424 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9425
9426#ifndef IN_NEM_DARWIN
9427 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9428
9429 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9430 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9431 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9432 | HMVMX_READ_IDT_VECTORING_INFO
9433 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9434 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9435
9436 /*
9437 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9438 */
9439 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9440 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9441 {
9442 /*
9443 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9444 * instruction emulation to inject the original event. Otherwise, injecting the original event
9445 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9446 */
9447 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9448 { /* likely */ }
9449 else
9450 {
9451 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9452# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9453 /** @todo NSTVMX: Think about how this should be handled. */
9454 if (pVmxTransient->fIsNestedGuest)
9455 return VERR_VMX_IPE_3;
9456# endif
9457 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9458 }
9459 }
9460 else
9461 {
9462 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9463 return rcStrict;
9464 }
9465
9466 /*
9467 * Get sufficient state and update the exit history entry.
9468 */
9469 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9470 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9471 AssertRCReturn(rc, rc);
9472
9473 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9474 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9475 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9476 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9477 if (!pExitRec)
9478 {
9479 /*
9480 * If we succeed, resume guest execution.
9481 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9482 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9483 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9484 * weird case. See @bugref{6043}.
9485 */
9486 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9487/** @todo bird: We can probably just go straight to IOM here and assume that
9488 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9489 * well. However, we need to address that aliasing workarounds that
9490 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9491 *
9492 * Might also be interesting to see if we can get this done more or
9493 * less locklessly inside IOM. Need to consider the lookup table
9494 * updating and use a bit more carefully first (or do all updates via
9495 * rendezvous) */
9496 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9497 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9498 if ( rcStrict == VINF_SUCCESS
9499 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9500 || rcStrict == VERR_PAGE_NOT_PRESENT)
9501 {
9502 /* Successfully handled MMIO operation. */
9503 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9504 | HM_CHANGED_GUEST_APIC_TPR);
9505 rcStrict = VINF_SUCCESS;
9506 }
9507 }
9508 else
9509 {
9510 /*
9511 * Frequent exit or something needing probing. Call EMHistoryExec.
9512 */
9513 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9514 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9515
9516 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9517 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9518
9519 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9520 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9521 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9522 }
9523 return rcStrict;
9524#else
9525 AssertFailed();
9526 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9527#endif
9528}
9529
9530
9531/**
9532 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9533 * VM-exit.
9534 */
9535HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9536{
9537 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9538#ifndef IN_NEM_DARWIN
9539 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9540
9541 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9542 | HMVMX_READ_EXIT_INSTR_LEN
9543 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9544 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9545 | HMVMX_READ_IDT_VECTORING_INFO
9546 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9547 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9548
9549 /*
9550 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9551 */
9552 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9553 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9554 {
9555 /*
9556 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9557 * we shall resolve the nested #PF and re-inject the original event.
9558 */
9559 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9560 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9561 }
9562 else
9563 {
9564 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9565 return rcStrict;
9566 }
9567
9568 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9569 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9570 AssertRCReturn(rc, rc);
9571
9572 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9573 uint64_t const uExitQual = pVmxTransient->uExitQual;
9574 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9575
9576 RTGCUINT uErrorCode = 0;
9577 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9578 uErrorCode |= X86_TRAP_PF_ID;
9579 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9580 uErrorCode |= X86_TRAP_PF_RW;
9581 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9582 uErrorCode |= X86_TRAP_PF_P;
9583
9584 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9585 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9586
9587 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9588
9589 /*
9590 * Handle the pagefault trap for the nested shadow table.
9591 */
9592 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9593 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9594 TRPMResetTrap(pVCpu);
9595
9596 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9597 if ( rcStrict == VINF_SUCCESS
9598 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9599 || rcStrict == VERR_PAGE_NOT_PRESENT)
9600 {
9601 /* Successfully synced our nested page tables. */
9602 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9603 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9604 return VINF_SUCCESS;
9605 }
9606 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9607 return rcStrict;
9608
9609#else /* IN_NEM_DARWIN */
9610 PVM pVM = pVCpu->CTX_SUFF(pVM);
9611 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9612 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9613 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9614 vmxHCImportGuestRip(pVCpu);
9615 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9616
9617 /*
9618 * Ask PGM for information about the given GCPhys. We need to check if we're
9619 * out of sync first.
9620 */
9621 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9622 false,
9623 false };
9624 PGMPHYSNEMPAGEINFO Info;
9625 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9626 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9627 if (RT_SUCCESS(rc))
9628 {
9629 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9630 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9631 {
9632 if (State.fCanResume)
9633 {
9634 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9635 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9636 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9637 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9638 State.fDidSomething ? "" : " no-change"));
9639 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9640 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9641 return VINF_SUCCESS;
9642 }
9643 }
9644
9645 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9646 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9647 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9648 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9649 State.fDidSomething ? "" : " no-change"));
9650 }
9651 else
9652 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9653 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9654 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9655
9656 /*
9657 * Emulate the memory access, either access handler or special memory.
9658 */
9659 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9660 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9661 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9662 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9663 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9664
9665 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9666 AssertRCReturn(rc, rc);
9667
9668 VBOXSTRICTRC rcStrict;
9669 if (!pExitRec)
9670 rcStrict = IEMExecOne(pVCpu);
9671 else
9672 {
9673 /* Frequent access or probing. */
9674 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9675 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9676 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9677 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9678 }
9679
9680 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9681
9682 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9683 return rcStrict;
9684#endif /* IN_NEM_DARWIN */
9685}
9686
9687#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9688
9689/**
9690 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9691 */
9692HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9693{
9694 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9695
9696 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9697 | HMVMX_READ_EXIT_INSTR_INFO
9698 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9699 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9700 | CPUMCTX_EXTRN_SREG_MASK
9701 | CPUMCTX_EXTRN_HWVIRT
9702 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9703 AssertRCReturn(rc, rc);
9704
9705 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9706
9707 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9708 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9709
9710 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9711 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9712 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9713 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9714 {
9715 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9716 rcStrict = VINF_SUCCESS;
9717 }
9718 return rcStrict;
9719}
9720
9721
9722/**
9723 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9724 */
9725HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9726{
9727 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9728
9729 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9730 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9731 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9732 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9733 AssertRCReturn(rc, rc);
9734
9735 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9736
9737 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9738 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9739 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9740 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9741 {
9742 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9743 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9744 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9745 }
9746 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9747 return rcStrict;
9748}
9749
9750
9751/**
9752 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9753 */
9754HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9755{
9756 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9757
9758 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9759 | HMVMX_READ_EXIT_INSTR_INFO
9760 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9761 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9762 | CPUMCTX_EXTRN_SREG_MASK
9763 | CPUMCTX_EXTRN_HWVIRT
9764 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9765 AssertRCReturn(rc, rc);
9766
9767 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9768
9769 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9770 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9771
9772 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9773 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9774 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9775 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9776 {
9777 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9778 rcStrict = VINF_SUCCESS;
9779 }
9780 return rcStrict;
9781}
9782
9783
9784/**
9785 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9786 */
9787HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9788{
9789 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9790
9791 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9792 | HMVMX_READ_EXIT_INSTR_INFO
9793 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9794 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9795 | CPUMCTX_EXTRN_SREG_MASK
9796 | CPUMCTX_EXTRN_HWVIRT
9797 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9798 AssertRCReturn(rc, rc);
9799
9800 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9801
9802 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9803 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9804
9805 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9806 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9807 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9808 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9809 {
9810 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9811 rcStrict = VINF_SUCCESS;
9812 }
9813 return rcStrict;
9814}
9815
9816
9817/**
9818 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9819 */
9820HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9821{
9822 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9823
9824 /*
9825 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9826 * thus might not need to import the shadow VMCS state, it's safer just in case
9827 * code elsewhere dares look at unsynced VMCS fields.
9828 */
9829 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9830 | HMVMX_READ_EXIT_INSTR_INFO
9831 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9832 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9833 | CPUMCTX_EXTRN_SREG_MASK
9834 | CPUMCTX_EXTRN_HWVIRT
9835 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9836 AssertRCReturn(rc, rc);
9837
9838 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9839
9840 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9841 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9842 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9843
9844 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9845 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9846 {
9847 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9848
9849# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9850 /* Try for exit optimization. This is on the following instruction
9851 because it would be a waste of time to have to reinterpret the
9852 already decoded vmwrite instruction. */
9853 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9854 if (pExitRec)
9855 {
9856 /* Frequent access or probing. */
9857 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9858 AssertRCReturn(rc, rc);
9859
9860 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9861 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9862 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9863 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9864 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9865 }
9866# endif
9867 }
9868 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9869 {
9870 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9871 rcStrict = VINF_SUCCESS;
9872 }
9873 return rcStrict;
9874}
9875
9876
9877/**
9878 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9879 */
9880HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9881{
9882 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9883
9884 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9885 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9886 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9887 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9888 AssertRCReturn(rc, rc);
9889
9890 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9891
9892 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9893 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9894 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9895 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9896 {
9897 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9898 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9899 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9900 }
9901 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9902 return rcStrict;
9903}
9904
9905
9906/**
9907 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9908 */
9909HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9910{
9911 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9912
9913 /*
9914 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9915 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9916 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9917 */
9918 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9919 | HMVMX_READ_EXIT_INSTR_INFO
9920 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9921 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9922 | CPUMCTX_EXTRN_SREG_MASK
9923 | CPUMCTX_EXTRN_HWVIRT
9924 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9925 AssertRCReturn(rc, rc);
9926
9927 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9928
9929 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9930 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9931 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9932
9933 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9934 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9935 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9936 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9937 {
9938 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9939 rcStrict = VINF_SUCCESS;
9940 }
9941 return rcStrict;
9942}
9943
9944
9945/**
9946 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9947 */
9948HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9949{
9950 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9951
9952 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9953 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9954 | CPUMCTX_EXTRN_HWVIRT
9955 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9956 AssertRCReturn(rc, rc);
9957
9958 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9959
9960 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9961 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9962 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9963 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9964 {
9965 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9966 rcStrict = VINF_SUCCESS;
9967 }
9968 return rcStrict;
9969}
9970
9971
9972/**
9973 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9974 */
9975HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9976{
9977 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9978
9979 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9980 | HMVMX_READ_EXIT_INSTR_INFO
9981 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9982 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9983 | CPUMCTX_EXTRN_SREG_MASK
9984 | CPUMCTX_EXTRN_HWVIRT
9985 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9986 AssertRCReturn(rc, rc);
9987
9988 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9989
9990 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9991 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9992
9993 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9994 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9995 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9996 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9997 {
9998 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9999 rcStrict = VINF_SUCCESS;
10000 }
10001 return rcStrict;
10002}
10003
10004
10005/**
10006 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
10007 */
10008HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10009{
10010 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10011
10012 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10013 | HMVMX_READ_EXIT_INSTR_INFO
10014 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10015 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
10016 | CPUMCTX_EXTRN_SREG_MASK
10017 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10018 AssertRCReturn(rc, rc);
10019
10020 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10021
10022 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10023 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10024
10025 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
10026 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10027 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10028 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10029 {
10030 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10031 rcStrict = VINF_SUCCESS;
10032 }
10033 return rcStrict;
10034}
10035
10036
10037# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10038/**
10039 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
10040 */
10041HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10042{
10043 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10044
10045 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10046 | HMVMX_READ_EXIT_INSTR_INFO
10047 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10048 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
10049 | CPUMCTX_EXTRN_SREG_MASK
10050 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10051 AssertRCReturn(rc, rc);
10052
10053 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10054
10055 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10056 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10057
10058 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
10059 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10060 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10061 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10062 {
10063 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10064 rcStrict = VINF_SUCCESS;
10065 }
10066 return rcStrict;
10067}
10068# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10069#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10070/** @} */
10071
10072
10073#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10074/** @name Nested-guest VM-exit handlers.
10075 * @{
10076 */
10077/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10078/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10079/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10080
10081/**
10082 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
10083 * Conditional VM-exit.
10084 */
10085HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10086{
10087 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10088
10089 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
10090
10091 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
10092 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
10093 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
10094
10095 switch (uExitIntType)
10096 {
10097# ifndef IN_NEM_DARWIN
10098 /*
10099 * Physical NMIs:
10100 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
10101 */
10102 case VMX_EXIT_INT_INFO_TYPE_NMI:
10103 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
10104# endif
10105
10106 /*
10107 * Hardware exceptions,
10108 * Software exceptions,
10109 * Privileged software exceptions:
10110 * Figure out if the exception must be delivered to the guest or the nested-guest.
10111 */
10112 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10113 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10114 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10115 {
10116 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10117 | HMVMX_READ_EXIT_INSTR_LEN
10118 | HMVMX_READ_IDT_VECTORING_INFO
10119 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10120
10121 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10122 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
10123 {
10124 /* Exit qualification is required for debug and page-fault exceptions. */
10125 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10126
10127 /*
10128 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
10129 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
10130 * length. However, if delivery of a software interrupt, software exception or privileged
10131 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
10132 */
10133 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10134 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
10135 pVmxTransient->uExitIntErrorCode,
10136 pVmxTransient->uIdtVectoringInfo,
10137 pVmxTransient->uIdtVectoringErrorCode);
10138#ifdef DEBUG_ramshankar
10139 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10140 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
10141 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
10142 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
10143 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
10144 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
10145#endif
10146 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
10147 }
10148
10149 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
10150 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10151 return vmxHCExitXcpt(pVCpu, pVmxTransient);
10152 }
10153
10154 /*
10155 * Software interrupts:
10156 * VM-exits cannot be caused by software interrupts.
10157 *
10158 * External interrupts:
10159 * This should only happen when "acknowledge external interrupts on VM-exit"
10160 * control is set. However, we never set this when executing a guest or
10161 * nested-guest. For nested-guests it is emulated while injecting interrupts into
10162 * the guest.
10163 */
10164 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10165 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
10166 default:
10167 {
10168 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
10169 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10170 }
10171 }
10172}
10173
10174
10175/**
10176 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10177 * Unconditional VM-exit.
10178 */
10179HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10180{
10181 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10182 return IEMExecVmxVmexitTripleFault(pVCpu);
10183}
10184
10185
10186/**
10187 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10188 */
10189HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10190{
10191 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10192
10193 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10194 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10195 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10196}
10197
10198
10199/**
10200 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10201 */
10202HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10203{
10204 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10205
10206 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10207 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10208 return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
10209}
10210
10211
10212/**
10213 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10214 * Unconditional VM-exit.
10215 */
10216HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10217{
10218 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10219
10220 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10221 | HMVMX_READ_EXIT_INSTR_LEN
10222 | HMVMX_READ_IDT_VECTORING_INFO
10223 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10224
10225 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10226 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10227 pVmxTransient->uIdtVectoringErrorCode);
10228 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10229}
10230
10231
10232/**
10233 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10234 */
10235HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10236{
10237 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10238
10239 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10240 {
10241 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10242 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10243 }
10244 return vmxHCExitHlt(pVCpu, pVmxTransient);
10245}
10246
10247
10248/**
10249 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10250 */
10251HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10252{
10253 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10254
10255 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10256 {
10257 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10258 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10259 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10260 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10261 }
10262 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10263}
10264
10265
10266/**
10267 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10268 */
10269HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10270{
10271 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10272
10273 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10274 {
10275 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10276 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10277 }
10278 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10279}
10280
10281
10282/**
10283 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10284 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10285 */
10286HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10287{
10288 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10289
10290 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10291 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10292
10293 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10294
10295 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10296 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10297 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10298
10299 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10300 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10301 u64VmcsField &= UINT64_C(0xffffffff);
10302
10303 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10304 {
10305 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10306 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10307 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10308 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10309 }
10310
10311 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10312 return vmxHCExitVmread(pVCpu, pVmxTransient);
10313 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10314}
10315
10316
10317/**
10318 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10319 */
10320HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10321{
10322 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10323
10324 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10325 {
10326 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10327 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10328 }
10329
10330 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10331}
10332
10333
10334/**
10335 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10336 * Conditional VM-exit.
10337 */
10338HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10339{
10340 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10341
10342 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10343 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10344
10345 VBOXSTRICTRC rcStrict;
10346 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10347 switch (uAccessType)
10348 {
10349 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10350 {
10351 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10352 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10353 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10354 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10355
10356 bool fIntercept;
10357 switch (iCrReg)
10358 {
10359 case 0:
10360 case 4:
10361 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10362 break;
10363
10364 case 3:
10365 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10366 break;
10367
10368 case 8:
10369 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10370 break;
10371
10372 default:
10373 fIntercept = false;
10374 break;
10375 }
10376 if (fIntercept)
10377 {
10378 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10379 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10380 }
10381 else
10382 {
10383 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10384 AssertRCReturn(rc, rc);
10385 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10386 }
10387 break;
10388 }
10389
10390 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10391 {
10392 /*
10393 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10394 * CR2 reads do not cause a VM-exit.
10395 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10396 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10397 */
10398 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10399 if ( iCrReg == 3
10400 || iCrReg == 8)
10401 {
10402 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10403 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10404 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10405 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10406 {
10407 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10408 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10409 }
10410 else
10411 {
10412 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10413 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10414 }
10415 }
10416 else
10417 {
10418 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10419 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10420 }
10421 break;
10422 }
10423
10424 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10425 {
10426 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10427 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10428 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10429 if ( (uGstHostMask & X86_CR0_TS)
10430 && (uReadShadow & X86_CR0_TS))
10431 {
10432 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10433 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10434 }
10435 else
10436 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10437 break;
10438 }
10439
10440 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10441 {
10442 RTGCPTR GCPtrEffDst;
10443 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10444 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10445 if (fMemOperand)
10446 {
10447 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10448 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10449 }
10450 else
10451 GCPtrEffDst = NIL_RTGCPTR;
10452
10453 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10454 {
10455 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10456 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10457 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10458 }
10459 else
10460 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10461 break;
10462 }
10463
10464 default:
10465 {
10466 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10467 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10468 }
10469 }
10470
10471 if (rcStrict == VINF_IEM_RAISED_XCPT)
10472 {
10473 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10474 rcStrict = VINF_SUCCESS;
10475 }
10476 return rcStrict;
10477}
10478
10479
10480/**
10481 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10482 * Conditional VM-exit.
10483 */
10484HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10485{
10486 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10487
10488 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10489 {
10490 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10491 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10492 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10493 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10494 }
10495 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10496}
10497
10498
10499/**
10500 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10501 * Conditional VM-exit.
10502 */
10503HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10504{
10505 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10506
10507 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10508
10509 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10510 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10511 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10512
10513 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10514 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10515 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10516 {
10517 /*
10518 * IN/OUT instruction:
10519 * - Provides VM-exit instruction length.
10520 *
10521 * INS/OUTS instruction:
10522 * - Provides VM-exit instruction length.
10523 * - Provides Guest-linear address.
10524 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10525 */
10526 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10527 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10528
10529 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10530 pVmxTransient->ExitInstrInfo.u = 0;
10531 pVmxTransient->uGuestLinearAddr = 0;
10532
10533 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10534 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10535 if (fIOString)
10536 {
10537 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10538 if (fVmxInsOutsInfo)
10539 {
10540 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10541 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10542 }
10543 }
10544
10545 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10546 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10547 }
10548 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10549}
10550
10551
10552/**
10553 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10554 */
10555HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10556{
10557 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10558
10559 uint32_t fMsrpm;
10560 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10561 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10562 else
10563 fMsrpm = VMXMSRPM_EXIT_RD;
10564
10565 if (fMsrpm & VMXMSRPM_EXIT_RD)
10566 {
10567 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10568 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10569 }
10570 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10571}
10572
10573
10574/**
10575 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10576 */
10577HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10578{
10579 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10580
10581 uint32_t fMsrpm;
10582 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10583 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10584 else
10585 fMsrpm = VMXMSRPM_EXIT_WR;
10586
10587 if (fMsrpm & VMXMSRPM_EXIT_WR)
10588 {
10589 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10590 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10591 }
10592 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10593}
10594
10595
10596/**
10597 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10598 */
10599HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10600{
10601 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10602
10603 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10604 {
10605 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10606 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10607 }
10608 return vmxHCExitMwait(pVCpu, pVmxTransient);
10609}
10610
10611
10612/**
10613 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10614 * VM-exit.
10615 */
10616HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10617{
10618 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10619
10620 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10621 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10622 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10623 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10624}
10625
10626
10627/**
10628 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10629 */
10630HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10631{
10632 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10633
10634 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10635 {
10636 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10637 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10638 }
10639 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10640}
10641
10642
10643/**
10644 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10645 */
10646HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10647{
10648 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10649
10650 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10651 * PAUSE when executing a nested-guest? If it does not, we would not need
10652 * to check for the intercepts here. Just call VM-exit... */
10653
10654 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10655 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10656 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10657 {
10658 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10659 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10660 }
10661 return vmxHCExitPause(pVCpu, pVmxTransient);
10662}
10663
10664
10665/**
10666 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10667 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10668 */
10669HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10670{
10671 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10672
10673 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10674 {
10675 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10676 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10677 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10678 }
10679 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10680}
10681
10682
10683/**
10684 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10685 * VM-exit.
10686 */
10687HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10688{
10689 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10690
10691 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10692 | HMVMX_READ_EXIT_INSTR_LEN
10693 | HMVMX_READ_IDT_VECTORING_INFO
10694 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10695
10696 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10697
10698 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10699 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10700
10701 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10702 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10703 pVmxTransient->uIdtVectoringErrorCode);
10704 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10705}
10706
10707
10708/**
10709 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10710 * Conditional VM-exit.
10711 */
10712HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10713{
10714 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10715
10716 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10717 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10718 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10719}
10720
10721
10722/**
10723 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10724 * Conditional VM-exit.
10725 */
10726HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10727{
10728 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10729
10730 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10731 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10732 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10733}
10734
10735
10736/**
10737 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10738 */
10739HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10740{
10741 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10742
10743 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10744 {
10745 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10746 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10747 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10748 }
10749 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10750}
10751
10752
10753/**
10754 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10755 */
10756HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10757{
10758 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10759
10760 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10761 {
10762 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10763 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10764 }
10765 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10766}
10767
10768
10769/**
10770 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10771 */
10772HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10773{
10774 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10775
10776 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10777 {
10778 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10779 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10780 | HMVMX_READ_EXIT_INSTR_INFO
10781 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10782 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10783 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10784 }
10785 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10786}
10787
10788
10789/**
10790 * Nested-guest VM-exit handler for invalid-guest state
10791 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10792 */
10793HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10794{
10795 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10796
10797 /*
10798 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10799 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10800 * Handle it like it's in an invalid guest state of the outer guest.
10801 *
10802 * When the fast path is implemented, this should be changed to cause the corresponding
10803 * nested-guest VM-exit.
10804 */
10805 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10806}
10807
10808
10809/**
10810 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10811 * and only provide the instruction length.
10812 *
10813 * Unconditional VM-exit.
10814 */
10815HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10816{
10817 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10818
10819#ifdef VBOX_STRICT
10820 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10821 switch (pVmxTransient->uExitReason)
10822 {
10823 case VMX_EXIT_ENCLS:
10824 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10825 break;
10826
10827 case VMX_EXIT_VMFUNC:
10828 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10829 break;
10830 }
10831#endif
10832
10833 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10834 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10835}
10836
10837
10838/**
10839 * Nested-guest VM-exit handler for instructions that provide instruction length as
10840 * well as more information.
10841 *
10842 * Unconditional VM-exit.
10843 */
10844HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10845{
10846 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10847
10848# ifdef VBOX_STRICT
10849 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10850 switch (pVmxTransient->uExitReason)
10851 {
10852 case VMX_EXIT_GDTR_IDTR_ACCESS:
10853 case VMX_EXIT_LDTR_TR_ACCESS:
10854 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10855 break;
10856
10857 case VMX_EXIT_RDRAND:
10858 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10859 break;
10860
10861 case VMX_EXIT_RDSEED:
10862 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10863 break;
10864
10865 case VMX_EXIT_XSAVES:
10866 case VMX_EXIT_XRSTORS:
10867 /** @todo NSTVMX: Verify XSS-bitmap. */
10868 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10869 break;
10870
10871 case VMX_EXIT_UMWAIT:
10872 case VMX_EXIT_TPAUSE:
10873 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10874 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10875 break;
10876
10877 case VMX_EXIT_LOADIWKEY:
10878 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10879 break;
10880 }
10881# endif
10882
10883 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10884 | HMVMX_READ_EXIT_INSTR_LEN
10885 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10886 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10887 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10888}
10889
10890# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10891
10892/**
10893 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10894 * Conditional VM-exit.
10895 */
10896HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10897{
10898 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10899 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10900
10901 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10902 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10903 {
10904 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10905 | HMVMX_READ_EXIT_INSTR_LEN
10906 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10907 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10908 | HMVMX_READ_IDT_VECTORING_INFO
10909 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10910 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10911 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10912 AssertRCReturn(rc, rc);
10913
10914 /*
10915 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10916 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10917 * it's its problem to deal with that issue and we'll clear the recovered event.
10918 */
10919 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10920 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10921 { /*likely*/ }
10922 else
10923 {
10924 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10925 return rcStrict;
10926 }
10927 uint32_t const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10928
10929 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10930 uint64_t const uExitQual = pVmxTransient->uExitQual;
10931
10932 RTGCPTR GCPtrNestedFault;
10933 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10934 if (fIsLinearAddrValid)
10935 {
10936 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10937 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10938 }
10939 else
10940 GCPtrNestedFault = 0;
10941
10942 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10943 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10944 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10945 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10946 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10947
10948 PGMPTWALK Walk;
10949 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10950 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
10951 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
10952 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10953 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10954 if (RT_SUCCESS(rcStrict))
10955 {
10956 if (rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE)
10957 {
10958 Assert(!fClearEventOnForward);
10959 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM));
10960 rcStrict = VINF_EM_RESCHEDULE_REM;
10961 }
10962 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
10963 return rcStrict;
10964 }
10965
10966 if (fClearEventOnForward)
10967 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10968
10969 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10970 pVmxTransient->uIdtVectoringErrorCode);
10971 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10972 {
10973 VMXVEXITINFO const ExitInfo
10974 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10975 pVmxTransient->uExitQual,
10976 pVmxTransient->cbExitInstr,
10977 pVmxTransient->uGuestLinearAddr,
10978 pVmxTransient->uGuestPhysicalAddr);
10979 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10980 }
10981
10982 AssertMsgReturn(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG,
10983 ("uErr=%#RX32 uExitQual=%#RX64 GCPhysNestedFault=%#RGp GCPtrNestedFault=%#RGv\n",
10984 (uint32_t)uErr, uExitQual, GCPhysNestedFault, GCPtrNestedFault),
10985 rcStrict);
10986 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10987 }
10988
10989 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10990}
10991
10992
10993/**
10994 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10995 * Conditional VM-exit.
10996 */
10997HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10998{
10999 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11000 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
11001
11002 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11003 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
11004 {
11005 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
11006 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
11007 AssertRCReturn(rc, rc);
11008
11009 PGMPTWALK Walk;
11010 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11011 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
11012 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
11013 GCPhysNestedFault, false /* fIsLinearAddrValid */,
11014 0 /* GCPtrNestedFault */, &Walk);
11015 if (RT_SUCCESS(rcStrict))
11016 {
11017 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
11018 return rcStrict;
11019 }
11020
11021 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
11022 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
11023 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
11024
11025 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
11026 pVmxTransient->uIdtVectoringErrorCode);
11027 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
11028 }
11029
11030 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
11031}
11032
11033# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
11034
11035/** @} */
11036#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
11037
11038
11039/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
11040 * probes.
11041 *
11042 * The following few functions and associated structure contains the bloat
11043 * necessary for providing detailed debug events and dtrace probes as well as
11044 * reliable host side single stepping. This works on the principle of
11045 * "subclassing" the normal execution loop and workers. We replace the loop
11046 * method completely and override selected helpers to add necessary adjustments
11047 * to their core operation.
11048 *
11049 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
11050 * any performance for debug and analysis features.
11051 *
11052 * @{
11053 */
11054
11055/**
11056 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
11057 * the debug run loop.
11058 */
11059typedef struct VMXRUNDBGSTATE
11060{
11061 /** The RIP we started executing at. This is for detecting that we stepped. */
11062 uint64_t uRipStart;
11063 /** The CS we started executing with. */
11064 uint16_t uCsStart;
11065
11066 /** Whether we've actually modified the 1st execution control field. */
11067 bool fModifiedProcCtls : 1;
11068 /** Whether we've actually modified the 2nd execution control field. */
11069 bool fModifiedProcCtls2 : 1;
11070 /** Whether we've actually modified the exception bitmap. */
11071 bool fModifiedXcptBitmap : 1;
11072
11073 /** We desire the modified the CR0 mask to be cleared. */
11074 bool fClearCr0Mask : 1;
11075 /** We desire the modified the CR4 mask to be cleared. */
11076 bool fClearCr4Mask : 1;
11077 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
11078 uint32_t fCpe1Extra;
11079 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
11080 uint32_t fCpe1Unwanted;
11081 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
11082 uint32_t fCpe2Extra;
11083 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
11084 uint32_t bmXcptExtra;
11085 /** The sequence number of the Dtrace provider settings the state was
11086 * configured against. */
11087 uint32_t uDtraceSettingsSeqNo;
11088 /** VM-exits to check (one bit per VM-exit). */
11089 uint32_t bmExitsToCheck[3];
11090
11091 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
11092 uint32_t fProcCtlsInitial;
11093 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
11094 uint32_t fProcCtls2Initial;
11095 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
11096 uint32_t bmXcptInitial;
11097} VMXRUNDBGSTATE;
11098AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
11099typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
11100
11101
11102/**
11103 * Initializes the VMXRUNDBGSTATE structure.
11104 *
11105 * @param pVCpu The cross context virtual CPU structure of the
11106 * calling EMT.
11107 * @param pVmxTransient The VMX-transient structure.
11108 * @param pDbgState The debug state to initialize.
11109 */
11110static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11111{
11112 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
11113 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
11114
11115 pDbgState->fModifiedProcCtls = false;
11116 pDbgState->fModifiedProcCtls2 = false;
11117 pDbgState->fModifiedXcptBitmap = false;
11118 pDbgState->fClearCr0Mask = false;
11119 pDbgState->fClearCr4Mask = false;
11120 pDbgState->fCpe1Extra = 0;
11121 pDbgState->fCpe1Unwanted = 0;
11122 pDbgState->fCpe2Extra = 0;
11123 pDbgState->bmXcptExtra = 0;
11124 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
11125 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
11126 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
11127}
11128
11129
11130/**
11131 * Updates the VMSC fields with changes requested by @a pDbgState.
11132 *
11133 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
11134 * immediately before executing guest code, i.e. when interrupts are disabled.
11135 * We don't check status codes here as we cannot easily assert or return in the
11136 * latter case.
11137 *
11138 * @param pVCpu The cross context virtual CPU structure.
11139 * @param pVmxTransient The VMX-transient structure.
11140 * @param pDbgState The debug state.
11141 */
11142static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11143{
11144 /*
11145 * Ensure desired flags in VMCS control fields are set.
11146 * (Ignoring write failure here, as we're committed and it's just debug extras.)
11147 *
11148 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
11149 * there should be no stale data in pCtx at this point.
11150 */
11151 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11152 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
11153 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
11154 {
11155 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
11156 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
11157 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
11158 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
11159 pDbgState->fModifiedProcCtls = true;
11160 }
11161
11162 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11163 {
11164 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11165 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11166 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11167 pDbgState->fModifiedProcCtls2 = true;
11168 }
11169
11170 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11171 {
11172 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11173 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11174 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11175 pDbgState->fModifiedXcptBitmap = true;
11176 }
11177
11178 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11179 {
11180 pVmcsInfo->u64Cr0Mask = 0;
11181 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
11182 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11183 }
11184
11185 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11186 {
11187 pVmcsInfo->u64Cr4Mask = 0;
11188 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11189 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11190 }
11191
11192 NOREF(pVCpu);
11193}
11194
11195
11196/**
11197 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11198 * re-entry next time around.
11199 *
11200 * @returns Strict VBox status code (i.e. informational status codes too).
11201 * @param pVCpu The cross context virtual CPU structure.
11202 * @param pVmxTransient The VMX-transient structure.
11203 * @param pDbgState The debug state.
11204 * @param rcStrict The return code from executing the guest using single
11205 * stepping.
11206 */
11207static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11208 VBOXSTRICTRC rcStrict)
11209{
11210 /*
11211 * Restore VM-exit control settings as we may not reenter this function the
11212 * next time around.
11213 */
11214 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11215
11216 /* We reload the initial value, trigger what we can of recalculations the
11217 next time around. From the looks of things, that's all that's required atm. */
11218 if (pDbgState->fModifiedProcCtls)
11219 {
11220 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11221 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11222 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11223 AssertRC(rc2);
11224 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11225 }
11226
11227 /* We're currently the only ones messing with this one, so just restore the
11228 cached value and reload the field. */
11229 if ( pDbgState->fModifiedProcCtls2
11230 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11231 {
11232 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11233 AssertRC(rc2);
11234 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11235 }
11236
11237 /* If we've modified the exception bitmap, we restore it and trigger
11238 reloading and partial recalculation the next time around. */
11239 if (pDbgState->fModifiedXcptBitmap)
11240 {
11241 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pDbgState->bmXcptInitial);
11242 AssertRC(rc2);
11243 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11244 }
11245
11246 return rcStrict;
11247}
11248
11249
11250/**
11251 * Configures VM-exit controls for current DBGF and DTrace settings.
11252 *
11253 * This updates @a pDbgState and the VMCS execution control fields to reflect
11254 * the necessary VM-exits demanded by DBGF and DTrace.
11255 *
11256 * @param pVCpu The cross context virtual CPU structure.
11257 * @param pVmxTransient The VMX-transient structure. May update
11258 * fUpdatedTscOffsettingAndPreemptTimer.
11259 * @param pDbgState The debug state.
11260 */
11261static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11262{
11263#ifndef IN_NEM_DARWIN
11264 /*
11265 * Take down the dtrace serial number so we can spot changes.
11266 */
11267 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11268 ASMCompilerBarrier();
11269#endif
11270
11271 /*
11272 * We'll rebuild most of the middle block of data members (holding the
11273 * current settings) as we go along here, so start by clearing it all.
11274 */
11275 pDbgState->bmXcptExtra = 0;
11276 pDbgState->fCpe1Extra = 0;
11277 pDbgState->fCpe1Unwanted = 0;
11278 pDbgState->fCpe2Extra = 0;
11279 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11280 pDbgState->bmExitsToCheck[i] = 0;
11281
11282 /*
11283 * Software interrupts (INT XXh) - no idea how to trigger these...
11284 */
11285 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11286 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11287 || VBOXVMM_INT_SOFTWARE_ENABLED())
11288 {
11289 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11290 }
11291
11292 /*
11293 * INT3 breakpoints - triggered by #BP exceptions.
11294 */
11295 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11296 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11297
11298 /*
11299 * Exception bitmap and XCPT events+probes.
11300 */
11301 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11302 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11303 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11304
11305 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11306 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11307 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11308 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11309 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11310 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11311 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11312 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11313 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11314 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11315 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11316 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11317 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11318 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11319 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11320 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11321 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11322 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11323
11324 if (pDbgState->bmXcptExtra)
11325 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11326
11327 /*
11328 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11329 *
11330 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11331 * So, when adding/changing/removing please don't forget to update it.
11332 *
11333 * Some of the macros are picking up local variables to save horizontal space,
11334 * (being able to see it in a table is the lesser evil here).
11335 */
11336#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11337 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11338 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11339#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11340 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11341 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11342 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11343 } else do { } while (0)
11344#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11345 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11346 { \
11347 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11348 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11349 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11350 } else do { } while (0)
11351#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11352 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11353 { \
11354 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11355 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11356 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11357 } else do { } while (0)
11358#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11359 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11360 { \
11361 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11362 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11363 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11364 } else do { } while (0)
11365
11366 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11367 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11368 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11369 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11370 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11371
11372 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11373 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11374 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11375 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11376 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11377 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11378 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11379 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11380 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11381 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11382 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11383 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11384 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11385 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11386 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11387 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11388 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11389 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11390 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11391 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11392 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11393 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11394 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11395 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11396 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11397 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11398 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11399 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11400 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11401 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11402 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11403 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11404 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11405 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11406 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11407 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11408
11409 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11410 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11411 {
11412 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11413 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11414 AssertRC(rc);
11415
11416#if 0 /** @todo fix me */
11417 pDbgState->fClearCr0Mask = true;
11418 pDbgState->fClearCr4Mask = true;
11419#endif
11420 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11421 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11422 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11423 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11424 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11425 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11426 require clearing here and in the loop if we start using it. */
11427 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11428 }
11429 else
11430 {
11431 if (pDbgState->fClearCr0Mask)
11432 {
11433 pDbgState->fClearCr0Mask = false;
11434 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11435 }
11436 if (pDbgState->fClearCr4Mask)
11437 {
11438 pDbgState->fClearCr4Mask = false;
11439 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11440 }
11441 }
11442 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11443 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11444
11445 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11446 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11447 {
11448 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11449 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11450 }
11451 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11452 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11453
11454 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11455 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11456 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11457 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11458 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11459 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11460 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11461 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11462#if 0 /** @todo too slow, fix handler. */
11463 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11464#endif
11465 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11466
11467 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11468 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11469 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11470 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11471 {
11472 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11473 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11474 }
11475 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11476 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11477 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11478 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11479
11480 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11481 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11482 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11483 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11484 {
11485 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11486 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11487 }
11488 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11489 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11490 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11491 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11492
11493 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11494 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11495 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11496 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11497 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11498 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11499 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11500 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11501 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11502 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11503 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11504 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11505 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11506 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11507 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11508 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11509 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11510 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11511 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11512 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11513 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11514 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11515
11516#undef IS_EITHER_ENABLED
11517#undef SET_ONLY_XBM_IF_EITHER_EN
11518#undef SET_CPE1_XBM_IF_EITHER_EN
11519#undef SET_CPEU_XBM_IF_EITHER_EN
11520#undef SET_CPE2_XBM_IF_EITHER_EN
11521
11522 /*
11523 * Sanitize the control stuff.
11524 */
11525 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11526 if (pDbgState->fCpe2Extra)
11527 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11528 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11529 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11530#ifndef IN_NEM_DARWIN
11531 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11532 {
11533 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11534 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11535 }
11536#else
11537 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11538 {
11539 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11540 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11541 }
11542#endif
11543
11544 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11545 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11546 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11547 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11548}
11549
11550
11551/**
11552 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11553 * appropriate.
11554 *
11555 * The caller has checked the VM-exit against the
11556 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11557 * already, so we don't have to do that either.
11558 *
11559 * @returns Strict VBox status code (i.e. informational status codes too).
11560 * @param pVCpu The cross context virtual CPU structure.
11561 * @param pVmxTransient The VMX-transient structure.
11562 * @param uExitReason The VM-exit reason.
11563 *
11564 * @remarks The name of this function is displayed by dtrace, so keep it short
11565 * and to the point. No longer than 33 chars long, please.
11566 */
11567static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11568{
11569 /*
11570 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11571 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11572 *
11573 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11574 * does. Must add/change/remove both places. Same ordering, please.
11575 *
11576 * Added/removed events must also be reflected in the next section
11577 * where we dispatch dtrace events.
11578 */
11579 bool fDtrace1 = false;
11580 bool fDtrace2 = false;
11581 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11582 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11583 uint32_t uEventArg = 0;
11584#define SET_EXIT(a_EventSubName) \
11585 do { \
11586 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11587 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11588 } while (0)
11589#define SET_BOTH(a_EventSubName) \
11590 do { \
11591 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11592 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11593 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11594 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11595 } while (0)
11596 switch (uExitReason)
11597 {
11598 case VMX_EXIT_MTF:
11599 return vmxHCExitMtf(pVCpu, pVmxTransient);
11600
11601 case VMX_EXIT_XCPT_OR_NMI:
11602 {
11603 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11604 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11605 {
11606 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11607 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11608 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11609 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11610 {
11611 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11612 {
11613 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11614 uEventArg = pVmxTransient->uExitIntErrorCode;
11615 }
11616 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11617 switch (enmEvent1)
11618 {
11619 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11620 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11621 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11622 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11623 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11624 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11625 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11626 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11627 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11628 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11629 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11630 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11631 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11632 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11633 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11634 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11635 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11636 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11637 default: break;
11638 }
11639 }
11640 else
11641 AssertFailed();
11642 break;
11643
11644 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11645 uEventArg = idxVector;
11646 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11647 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11648 break;
11649 }
11650 break;
11651 }
11652
11653 case VMX_EXIT_TRIPLE_FAULT:
11654 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11655 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11656 break;
11657 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11658 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11659 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11660 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11661 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11662
11663 /* Instruction specific VM-exits: */
11664 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11665 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11666 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11667 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11668 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11669 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11670 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11671 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11672 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11673 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11674 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11675 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11676 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11677 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11678 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11679 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11680 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11681 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11682 case VMX_EXIT_MOV_CRX:
11683 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11684 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11685 SET_BOTH(CRX_READ);
11686 else
11687 SET_BOTH(CRX_WRITE);
11688 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11689 break;
11690 case VMX_EXIT_MOV_DRX:
11691 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11692 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11693 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11694 SET_BOTH(DRX_READ);
11695 else
11696 SET_BOTH(DRX_WRITE);
11697 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11698 break;
11699 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11700 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11701 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11702 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11703 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11704 case VMX_EXIT_GDTR_IDTR_ACCESS:
11705 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11706 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11707 {
11708 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11709 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11710 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11711 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11712 }
11713 break;
11714
11715 case VMX_EXIT_LDTR_TR_ACCESS:
11716 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11717 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11718 {
11719 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11720 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11721 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11722 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11723 }
11724 break;
11725
11726 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11727 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11728 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11729 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11730 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11731 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11732 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11733 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11734 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11735 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11736 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11737
11738 /* Events that aren't relevant at this point. */
11739 case VMX_EXIT_EXT_INT:
11740 case VMX_EXIT_INT_WINDOW:
11741 case VMX_EXIT_NMI_WINDOW:
11742 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11743 case VMX_EXIT_PREEMPT_TIMER:
11744 case VMX_EXIT_IO_INSTR:
11745 break;
11746
11747 /* Errors and unexpected events. */
11748 case VMX_EXIT_INIT_SIGNAL:
11749 case VMX_EXIT_SIPI:
11750 case VMX_EXIT_IO_SMI:
11751 case VMX_EXIT_SMI:
11752 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11753 case VMX_EXIT_ERR_MSR_LOAD:
11754 case VMX_EXIT_ERR_MACHINE_CHECK:
11755 case VMX_EXIT_PML_FULL:
11756 case VMX_EXIT_VIRTUALIZED_EOI:
11757 break;
11758
11759 default:
11760 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11761 break;
11762 }
11763#undef SET_BOTH
11764#undef SET_EXIT
11765
11766 /*
11767 * Dtrace tracepoints go first. We do them here at once so we don't
11768 * have to copy the guest state saving and stuff a few dozen times.
11769 * Down side is that we've got to repeat the switch, though this time
11770 * we use enmEvent since the probes are a subset of what DBGF does.
11771 */
11772 if (fDtrace1 || fDtrace2)
11773 {
11774 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11775 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11776 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; RT_NOREF(pCtx); /* Shut up Clang 13. */
11777 switch (enmEvent1)
11778 {
11779 /** @todo consider which extra parameters would be helpful for each probe. */
11780 case DBGFEVENT_END: break;
11781 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11782 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11783 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11784 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11785 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11786 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11787 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11788 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11789 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11790 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11791 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11792 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11793 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11794 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11795 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11796 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11797 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11798 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11799 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11800 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11801 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11802 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11803 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11804 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11805 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11806 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11807 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11808 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11809 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11810 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11811 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11812 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11813 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11814 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11815 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11816 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11817 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11818 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11819 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11820 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11821 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11822 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11823 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11824 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11825 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11826 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11827 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11828 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11829 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11830 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11831 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11832 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11833 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11834 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11835 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11836 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11837 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11838 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11839 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11840 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11841 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11842 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11843 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11844 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11845 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11846 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11847 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11848 }
11849 switch (enmEvent2)
11850 {
11851 /** @todo consider which extra parameters would be helpful for each probe. */
11852 case DBGFEVENT_END: break;
11853 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11854 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11855 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11856 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11857 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11858 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11859 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11860 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11861 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11862 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11863 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11864 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11865 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11866 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11867 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11868 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11869 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11870 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11871 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11872 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11873 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11874 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11875 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11876 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11877 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11878 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11879 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11880 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11881 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11882 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11883 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11884 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11885 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11886 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11887 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11888 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11889 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11890 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11891 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11892 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11893 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11894 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11895 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11896 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11897 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11898 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11899 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11900 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11901 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11902 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11903 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11904 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11905 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11906 }
11907 }
11908
11909 /*
11910 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11911 * the DBGF call will do a full check).
11912 *
11913 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11914 * Note! If we have to events, we prioritize the first, i.e. the instruction
11915 * one, in order to avoid event nesting.
11916 */
11917 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11918 if ( enmEvent1 != DBGFEVENT_END
11919 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11920 {
11921 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11922 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11923 if (rcStrict != VINF_SUCCESS)
11924 return rcStrict;
11925 }
11926 else if ( enmEvent2 != DBGFEVENT_END
11927 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11928 {
11929 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11930 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11931 if (rcStrict != VINF_SUCCESS)
11932 return rcStrict;
11933 }
11934
11935 return VINF_SUCCESS;
11936}
11937
11938
11939/**
11940 * Single-stepping VM-exit filtering.
11941 *
11942 * This is preprocessing the VM-exits and deciding whether we've gotten far
11943 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11944 * handling is performed.
11945 *
11946 * @returns Strict VBox status code (i.e. informational status codes too).
11947 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11948 * @param pVmxTransient The VMX-transient structure.
11949 * @param pDbgState The debug state.
11950 */
11951DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11952{
11953 /*
11954 * Expensive (saves context) generic dtrace VM-exit probe.
11955 */
11956 uint32_t const uExitReason = pVmxTransient->uExitReason;
11957 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11958 { /* more likely */ }
11959 else
11960 {
11961 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11962 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11963 AssertRC(rc);
11964 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11965 }
11966
11967#ifndef IN_NEM_DARWIN
11968 /*
11969 * Check for host NMI, just to get that out of the way.
11970 */
11971 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11972 { /* normally likely */ }
11973 else
11974 {
11975 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11976 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11977 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11978 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11979 }
11980#endif
11981
11982 /*
11983 * Check for single stepping event if we're stepping.
11984 */
11985 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11986 {
11987 switch (uExitReason)
11988 {
11989 case VMX_EXIT_MTF:
11990 return vmxHCExitMtf(pVCpu, pVmxTransient);
11991
11992 /* Various events: */
11993 case VMX_EXIT_XCPT_OR_NMI:
11994 case VMX_EXIT_EXT_INT:
11995 case VMX_EXIT_TRIPLE_FAULT:
11996 case VMX_EXIT_INT_WINDOW:
11997 case VMX_EXIT_NMI_WINDOW:
11998 case VMX_EXIT_TASK_SWITCH:
11999 case VMX_EXIT_TPR_BELOW_THRESHOLD:
12000 case VMX_EXIT_APIC_ACCESS:
12001 case VMX_EXIT_EPT_VIOLATION:
12002 case VMX_EXIT_EPT_MISCONFIG:
12003 case VMX_EXIT_PREEMPT_TIMER:
12004
12005 /* Instruction specific VM-exits: */
12006 case VMX_EXIT_CPUID:
12007 case VMX_EXIT_GETSEC:
12008 case VMX_EXIT_HLT:
12009 case VMX_EXIT_INVD:
12010 case VMX_EXIT_INVLPG:
12011 case VMX_EXIT_RDPMC:
12012 case VMX_EXIT_RDTSC:
12013 case VMX_EXIT_RSM:
12014 case VMX_EXIT_VMCALL:
12015 case VMX_EXIT_VMCLEAR:
12016 case VMX_EXIT_VMLAUNCH:
12017 case VMX_EXIT_VMPTRLD:
12018 case VMX_EXIT_VMPTRST:
12019 case VMX_EXIT_VMREAD:
12020 case VMX_EXIT_VMRESUME:
12021 case VMX_EXIT_VMWRITE:
12022 case VMX_EXIT_VMXOFF:
12023 case VMX_EXIT_VMXON:
12024 case VMX_EXIT_MOV_CRX:
12025 case VMX_EXIT_MOV_DRX:
12026 case VMX_EXIT_IO_INSTR:
12027 case VMX_EXIT_RDMSR:
12028 case VMX_EXIT_WRMSR:
12029 case VMX_EXIT_MWAIT:
12030 case VMX_EXIT_MONITOR:
12031 case VMX_EXIT_PAUSE:
12032 case VMX_EXIT_GDTR_IDTR_ACCESS:
12033 case VMX_EXIT_LDTR_TR_ACCESS:
12034 case VMX_EXIT_INVEPT:
12035 case VMX_EXIT_RDTSCP:
12036 case VMX_EXIT_INVVPID:
12037 case VMX_EXIT_WBINVD:
12038 case VMX_EXIT_XSETBV:
12039 case VMX_EXIT_RDRAND:
12040 case VMX_EXIT_INVPCID:
12041 case VMX_EXIT_VMFUNC:
12042 case VMX_EXIT_RDSEED:
12043 case VMX_EXIT_XSAVES:
12044 case VMX_EXIT_XRSTORS:
12045 {
12046 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
12047 AssertRCReturn(rc, rc);
12048 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
12049 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
12050 return VINF_EM_DBG_STEPPED;
12051 break;
12052 }
12053
12054 /* Errors and unexpected events: */
12055 case VMX_EXIT_INIT_SIGNAL:
12056 case VMX_EXIT_SIPI:
12057 case VMX_EXIT_IO_SMI:
12058 case VMX_EXIT_SMI:
12059 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
12060 case VMX_EXIT_ERR_MSR_LOAD:
12061 case VMX_EXIT_ERR_MACHINE_CHECK:
12062 case VMX_EXIT_PML_FULL:
12063 case VMX_EXIT_VIRTUALIZED_EOI:
12064 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
12065 break;
12066
12067 default:
12068 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
12069 break;
12070 }
12071 }
12072
12073 /*
12074 * Check for debugger event breakpoints and dtrace probes.
12075 */
12076 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
12077 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
12078 {
12079 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
12080 if (rcStrict != VINF_SUCCESS)
12081 return rcStrict;
12082 }
12083
12084 /*
12085 * Normal processing.
12086 */
12087#ifdef HMVMX_USE_FUNCTION_TABLE
12088 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
12089#else
12090 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
12091#endif
12092}
12093
12094/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette