VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 97153

Last change on this file since 97153 was 97094, checked in by vboxsync, 2 years ago

VMM/HMVMXR0: Reduce the number of fRealOnV86Active conditionals. We can bunch these up since they are not typically used, both because of VM config and because guests typically doesn't spend too much time in real mode.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 520.9 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 97094 2022-10-11 20:46:24Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330
331 /* 16-bit guest-state fields. */
332 VMX_VMCS16_GUEST_ES_SEL,
333 VMX_VMCS16_GUEST_CS_SEL,
334 VMX_VMCS16_GUEST_SS_SEL,
335 VMX_VMCS16_GUEST_DS_SEL,
336 VMX_VMCS16_GUEST_FS_SEL,
337 VMX_VMCS16_GUEST_GS_SEL,
338 VMX_VMCS16_GUEST_LDTR_SEL,
339 VMX_VMCS16_GUEST_TR_SEL,
340 VMX_VMCS16_GUEST_INTR_STATUS,
341 VMX_VMCS16_GUEST_PML_INDEX,
342
343 /* 16-bits host-state fields. */
344 VMX_VMCS16_HOST_ES_SEL,
345 VMX_VMCS16_HOST_CS_SEL,
346 VMX_VMCS16_HOST_SS_SEL,
347 VMX_VMCS16_HOST_DS_SEL,
348 VMX_VMCS16_HOST_FS_SEL,
349 VMX_VMCS16_HOST_GS_SEL,
350 VMX_VMCS16_HOST_TR_SEL,
351
352 /* 64-bit control fields. */
353 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
354 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
355 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
357 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
358 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
359 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
361 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
363 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
365 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
367 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
369 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
370 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
371 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
373 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
375 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
377 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
379 VMX_VMCS64_CTRL_EPTP_FULL,
380 VMX_VMCS64_CTRL_EPTP_HIGH,
381 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
383 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
385 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
387 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
389 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
390 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
391 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
393 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
395 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
397 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
399 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
401 VMX_VMCS64_CTRL_SPPTP_FULL,
402 VMX_VMCS64_CTRL_SPPTP_HIGH,
403 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
405 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
406 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
407 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
409
410 /* 64-bit read-only data fields. */
411 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
412 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
413
414 /* 64-bit guest-state fields. */
415 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
416 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
417 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
418 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
419 VMX_VMCS64_GUEST_PAT_FULL,
420 VMX_VMCS64_GUEST_PAT_HIGH,
421 VMX_VMCS64_GUEST_EFER_FULL,
422 VMX_VMCS64_GUEST_EFER_HIGH,
423 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
424 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
425 VMX_VMCS64_GUEST_PDPTE0_FULL,
426 VMX_VMCS64_GUEST_PDPTE0_HIGH,
427 VMX_VMCS64_GUEST_PDPTE1_FULL,
428 VMX_VMCS64_GUEST_PDPTE1_HIGH,
429 VMX_VMCS64_GUEST_PDPTE2_FULL,
430 VMX_VMCS64_GUEST_PDPTE2_HIGH,
431 VMX_VMCS64_GUEST_PDPTE3_FULL,
432 VMX_VMCS64_GUEST_PDPTE3_HIGH,
433 VMX_VMCS64_GUEST_BNDCFGS_FULL,
434 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
435 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
436 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
437 VMX_VMCS64_GUEST_PKRS_FULL,
438 VMX_VMCS64_GUEST_PKRS_HIGH,
439
440 /* 64-bit host-state fields. */
441 VMX_VMCS64_HOST_PAT_FULL,
442 VMX_VMCS64_HOST_PAT_HIGH,
443 VMX_VMCS64_HOST_EFER_FULL,
444 VMX_VMCS64_HOST_EFER_HIGH,
445 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
446 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
447 VMX_VMCS64_HOST_PKRS_FULL,
448 VMX_VMCS64_HOST_PKRS_HIGH,
449
450 /* 32-bit control fields. */
451 VMX_VMCS32_CTRL_PIN_EXEC,
452 VMX_VMCS32_CTRL_PROC_EXEC,
453 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
454 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
455 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
456 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
457 VMX_VMCS32_CTRL_EXIT,
458 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
459 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
460 VMX_VMCS32_CTRL_ENTRY,
461 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
462 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
463 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
464 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
465 VMX_VMCS32_CTRL_TPR_THRESHOLD,
466 VMX_VMCS32_CTRL_PROC_EXEC2,
467 VMX_VMCS32_CTRL_PLE_GAP,
468 VMX_VMCS32_CTRL_PLE_WINDOW,
469
470 /* 32-bits read-only fields. */
471 VMX_VMCS32_RO_VM_INSTR_ERROR,
472 VMX_VMCS32_RO_EXIT_REASON,
473 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
474 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
475 VMX_VMCS32_RO_IDT_VECTORING_INFO,
476 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
477 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
478 VMX_VMCS32_RO_EXIT_INSTR_INFO,
479
480 /* 32-bit guest-state fields. */
481 VMX_VMCS32_GUEST_ES_LIMIT,
482 VMX_VMCS32_GUEST_CS_LIMIT,
483 VMX_VMCS32_GUEST_SS_LIMIT,
484 VMX_VMCS32_GUEST_DS_LIMIT,
485 VMX_VMCS32_GUEST_FS_LIMIT,
486 VMX_VMCS32_GUEST_GS_LIMIT,
487 VMX_VMCS32_GUEST_LDTR_LIMIT,
488 VMX_VMCS32_GUEST_TR_LIMIT,
489 VMX_VMCS32_GUEST_GDTR_LIMIT,
490 VMX_VMCS32_GUEST_IDTR_LIMIT,
491 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
492 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
493 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
494 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
495 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
496 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
497 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
498 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_INT_STATE,
500 VMX_VMCS32_GUEST_ACTIVITY_STATE,
501 VMX_VMCS32_GUEST_SMBASE,
502 VMX_VMCS32_GUEST_SYSENTER_CS,
503 VMX_VMCS32_PREEMPT_TIMER_VALUE,
504
505 /* 32-bit host-state fields. */
506 VMX_VMCS32_HOST_SYSENTER_CS,
507
508 /* Natural-width control fields. */
509 VMX_VMCS_CTRL_CR0_MASK,
510 VMX_VMCS_CTRL_CR4_MASK,
511 VMX_VMCS_CTRL_CR0_READ_SHADOW,
512 VMX_VMCS_CTRL_CR4_READ_SHADOW,
513 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
514 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
515 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
516 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
517
518 /* Natural-width read-only data fields. */
519 VMX_VMCS_RO_EXIT_QUALIFICATION,
520 VMX_VMCS_RO_IO_RCX,
521 VMX_VMCS_RO_IO_RSI,
522 VMX_VMCS_RO_IO_RDI,
523 VMX_VMCS_RO_IO_RIP,
524 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
525
526 /* Natural-width guest-state field */
527 VMX_VMCS_GUEST_CR0,
528 VMX_VMCS_GUEST_CR3,
529 VMX_VMCS_GUEST_CR4,
530 VMX_VMCS_GUEST_ES_BASE,
531 VMX_VMCS_GUEST_CS_BASE,
532 VMX_VMCS_GUEST_SS_BASE,
533 VMX_VMCS_GUEST_DS_BASE,
534 VMX_VMCS_GUEST_FS_BASE,
535 VMX_VMCS_GUEST_GS_BASE,
536 VMX_VMCS_GUEST_LDTR_BASE,
537 VMX_VMCS_GUEST_TR_BASE,
538 VMX_VMCS_GUEST_GDTR_BASE,
539 VMX_VMCS_GUEST_IDTR_BASE,
540 VMX_VMCS_GUEST_DR7,
541 VMX_VMCS_GUEST_RSP,
542 VMX_VMCS_GUEST_RIP,
543 VMX_VMCS_GUEST_RFLAGS,
544 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
545 VMX_VMCS_GUEST_SYSENTER_ESP,
546 VMX_VMCS_GUEST_SYSENTER_EIP,
547 VMX_VMCS_GUEST_S_CET,
548 VMX_VMCS_GUEST_SSP,
549 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
550
551 /* Natural-width host-state fields */
552 VMX_VMCS_HOST_CR0,
553 VMX_VMCS_HOST_CR3,
554 VMX_VMCS_HOST_CR4,
555 VMX_VMCS_HOST_FS_BASE,
556 VMX_VMCS_HOST_GS_BASE,
557 VMX_VMCS_HOST_TR_BASE,
558 VMX_VMCS_HOST_GDTR_BASE,
559 VMX_VMCS_HOST_IDTR_BASE,
560 VMX_VMCS_HOST_SYSENTER_ESP,
561 VMX_VMCS_HOST_SYSENTER_EIP,
562 VMX_VMCS_HOST_RSP,
563 VMX_VMCS_HOST_RIP,
564 VMX_VMCS_HOST_S_CET,
565 VMX_VMCS_HOST_SSP,
566 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
567};
568#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
569
570#ifdef HMVMX_USE_FUNCTION_TABLE
571/**
572 * VMX_EXIT dispatch table.
573 */
574static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
575{
576 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
577 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
578 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
579 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
580 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
581 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
582 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
583 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
584 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
585 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
586 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
587 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
588 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
589 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
590 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
591 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
592 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
593 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
594 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
595#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
596 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
597 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
598 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
599 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
600 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
601 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
602 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
603 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
604 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
605#else
606 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
607 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
608 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
609 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
610 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
611 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
612 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
613 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
614 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
615#endif
616 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
617 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
618 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
619 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
620 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
621 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
622 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
623 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
624 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
625 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
626 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
627 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
628 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
629 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
630 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
632 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
633 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
634 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
635 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
636 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
637 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
638#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
639 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
640#else
641 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
642#endif
643 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
644 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
646 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
647#else
648 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
651 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
652 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
653 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
654 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
655 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
656 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
657 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
658 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
659 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
660 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
661 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
662 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
663 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
664 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
665 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
666};
667#endif /* HMVMX_USE_FUNCTION_TABLE */
668
669#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
670static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
671{
672 /* 0 */ "(Not Used)",
673 /* 1 */ "VMCALL executed in VMX root operation.",
674 /* 2 */ "VMCLEAR with invalid physical address.",
675 /* 3 */ "VMCLEAR with VMXON pointer.",
676 /* 4 */ "VMLAUNCH with non-clear VMCS.",
677 /* 5 */ "VMRESUME with non-launched VMCS.",
678 /* 6 */ "VMRESUME after VMXOFF",
679 /* 7 */ "VM-entry with invalid control fields.",
680 /* 8 */ "VM-entry with invalid host state fields.",
681 /* 9 */ "VMPTRLD with invalid physical address.",
682 /* 10 */ "VMPTRLD with VMXON pointer.",
683 /* 11 */ "VMPTRLD with incorrect revision identifier.",
684 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
685 /* 13 */ "VMWRITE to read-only VMCS component.",
686 /* 14 */ "(Not Used)",
687 /* 15 */ "VMXON executed in VMX root operation.",
688 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
689 /* 17 */ "VM-entry with non-launched executing VMCS.",
690 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
691 /* 19 */ "VMCALL with non-clear VMCS.",
692 /* 20 */ "VMCALL with invalid VM-exit control fields.",
693 /* 21 */ "(Not Used)",
694 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
695 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
696 /* 24 */ "VMCALL with invalid SMM-monitor features.",
697 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
698 /* 26 */ "VM-entry with events blocked by MOV SS.",
699 /* 27 */ "(Not Used)",
700 /* 28 */ "Invalid operand to INVEPT/INVVPID."
701};
702#endif /* VBOX_STRICT && LOG_ENABLED */
703
704
705/**
706 * Gets the CR0 guest/host mask.
707 *
708 * These bits typically does not change through the lifetime of a VM. Any bit set in
709 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
710 * by the guest.
711 *
712 * @returns The CR0 guest/host mask.
713 * @param pVCpu The cross context virtual CPU structure.
714 */
715static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
716{
717 /*
718 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
719 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
720 *
721 * Furthermore, modifications to any bits that are reserved/unspecified currently
722 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
723 * when future CPUs specify and use currently reserved/unspecified bits.
724 */
725 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
726 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
727 * and @bugref{6944}. */
728 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
729 return ( X86_CR0_PE
730 | X86_CR0_NE
731 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
732 | X86_CR0_PG
733 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
734}
735
736
737/**
738 * Gets the CR4 guest/host mask.
739 *
740 * These bits typically does not change through the lifetime of a VM. Any bit set in
741 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
742 * by the guest.
743 *
744 * @returns The CR4 guest/host mask.
745 * @param pVCpu The cross context virtual CPU structure.
746 */
747static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
748{
749 /*
750 * We construct a mask of all CR4 bits that the guest can modify without causing
751 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
752 * a VM-exit when the guest attempts to modify them when executing using
753 * hardware-assisted VMX.
754 *
755 * When a feature is not exposed to the guest (and may be present on the host),
756 * we want to intercept guest modifications to the bit so we can emulate proper
757 * behavior (e.g., #GP).
758 *
759 * Furthermore, only modifications to those bits that don't require immediate
760 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
761 * depends on CR3 which might not always be the guest value while executing
762 * using hardware-assisted VMX.
763 */
764 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
765 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
766#ifdef IN_NEM_DARWIN
767 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
768#endif
769 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
770
771 /*
772 * Paranoia.
773 * Ensure features exposed to the guest are present on the host.
774 */
775 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
776#ifdef IN_NEM_DARWIN
777 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
778#endif
779 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
780
781 uint64_t const fGstMask = X86_CR4_PVI
782 | X86_CR4_TSD
783 | X86_CR4_DE
784 | X86_CR4_MCE
785 | X86_CR4_PCE
786 | X86_CR4_OSXMMEEXCPT
787 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
788#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
789 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
790 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
791#endif
792 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
793 return ~fGstMask;
794}
795
796
797/**
798 * Adds one or more exceptions to the exception bitmap and commits it to the current
799 * VMCS.
800 *
801 * @param pVCpu The cross context virtual CPU structure.
802 * @param pVmxTransient The VMX-transient structure.
803 * @param uXcptMask The exception(s) to add.
804 */
805static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
806{
807 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
808 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
809 if ((uXcptBitmap & uXcptMask) != uXcptMask)
810 {
811 uXcptBitmap |= uXcptMask;
812 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
813 AssertRC(rc);
814 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
815 }
816}
817
818
819/**
820 * Adds an exception to the exception bitmap and commits it to the current VMCS.
821 *
822 * @param pVCpu The cross context virtual CPU structure.
823 * @param pVmxTransient The VMX-transient structure.
824 * @param uXcpt The exception to add.
825 */
826static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
827{
828 Assert(uXcpt <= X86_XCPT_LAST);
829 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
830}
831
832
833/**
834 * Remove one or more exceptions from the exception bitmap and commits it to the
835 * current VMCS.
836 *
837 * This takes care of not removing the exception intercept if a nested-guest
838 * requires the exception to be intercepted.
839 *
840 * @returns VBox status code.
841 * @param pVCpu The cross context virtual CPU structure.
842 * @param pVmxTransient The VMX-transient structure.
843 * @param uXcptMask The exception(s) to remove.
844 */
845static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
846{
847 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
848 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
849 if (u32XcptBitmap & uXcptMask)
850 {
851#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
852 if (!pVmxTransient->fIsNestedGuest)
853 { /* likely */ }
854 else
855 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
856#endif
857#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
858 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
859 | RT_BIT(X86_XCPT_DE)
860 | RT_BIT(X86_XCPT_NM)
861 | RT_BIT(X86_XCPT_TS)
862 | RT_BIT(X86_XCPT_UD)
863 | RT_BIT(X86_XCPT_NP)
864 | RT_BIT(X86_XCPT_SS)
865 | RT_BIT(X86_XCPT_GP)
866 | RT_BIT(X86_XCPT_PF)
867 | RT_BIT(X86_XCPT_MF));
868#elif defined(HMVMX_ALWAYS_TRAP_PF)
869 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
870#endif
871 if (uXcptMask)
872 {
873 /* Validate we are not removing any essential exception intercepts. */
874#ifndef IN_NEM_DARWIN
875 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
876#else
877 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
878#endif
879 NOREF(pVCpu);
880 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
881 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
882
883 /* Remove it from the exception bitmap. */
884 u32XcptBitmap &= ~uXcptMask;
885
886 /* Commit and update the cache if necessary. */
887 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
888 {
889 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
890 AssertRC(rc);
891 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
892 }
893 }
894 }
895 return VINF_SUCCESS;
896}
897
898
899/**
900 * Remove an exceptions from the exception bitmap and commits it to the current
901 * VMCS.
902 *
903 * @returns VBox status code.
904 * @param pVCpu The cross context virtual CPU structure.
905 * @param pVmxTransient The VMX-transient structure.
906 * @param uXcpt The exception to remove.
907 */
908static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
909{
910 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
911}
912
913#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
914
915/**
916 * Loads the shadow VMCS specified by the VMCS info. object.
917 *
918 * @returns VBox status code.
919 * @param pVmcsInfo The VMCS info. object.
920 *
921 * @remarks Can be called with interrupts disabled.
922 */
923static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
924{
925 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
926 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
927
928 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
929 if (RT_SUCCESS(rc))
930 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
931 return rc;
932}
933
934
935/**
936 * Clears the shadow VMCS specified by the VMCS info. object.
937 *
938 * @returns VBox status code.
939 * @param pVmcsInfo The VMCS info. object.
940 *
941 * @remarks Can be called with interrupts disabled.
942 */
943static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
944{
945 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
946 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
947
948 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
949 if (RT_SUCCESS(rc))
950 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
951 return rc;
952}
953
954
955/**
956 * Switches from and to the specified VMCSes.
957 *
958 * @returns VBox status code.
959 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
960 * @param pVmcsInfoTo The VMCS info. object we are switching to.
961 *
962 * @remarks Called with interrupts disabled.
963 */
964static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
965{
966 /*
967 * Clear the VMCS we are switching out if it has not already been cleared.
968 * This will sync any CPU internal data back to the VMCS.
969 */
970 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
971 {
972 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
973 if (RT_SUCCESS(rc))
974 {
975 /*
976 * The shadow VMCS, if any, would not be active at this point since we
977 * would have cleared it while importing the virtual hardware-virtualization
978 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
979 * clear the shadow VMCS here, just assert for safety.
980 */
981 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
982 }
983 else
984 return rc;
985 }
986
987 /*
988 * Clear the VMCS we are switching to if it has not already been cleared.
989 * This will initialize the VMCS launch state to "clear" required for loading it.
990 *
991 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
992 */
993 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
994 {
995 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
996 if (RT_SUCCESS(rc))
997 { /* likely */ }
998 else
999 return rc;
1000 }
1001
1002 /*
1003 * Finally, load the VMCS we are switching to.
1004 */
1005 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1006}
1007
1008
1009/**
1010 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1011 * caller.
1012 *
1013 * @returns VBox status code.
1014 * @param pVCpu The cross context virtual CPU structure.
1015 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1016 * true) or guest VMCS (pass false).
1017 */
1018static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1019{
1020 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1021 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1022
1023 PVMXVMCSINFO pVmcsInfoFrom;
1024 PVMXVMCSINFO pVmcsInfoTo;
1025 if (fSwitchToNstGstVmcs)
1026 {
1027 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1028 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1029 }
1030 else
1031 {
1032 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1033 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1034 }
1035
1036 /*
1037 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1038 * preemption hook code path acquires the current VMCS.
1039 */
1040 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1041
1042 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1043 if (RT_SUCCESS(rc))
1044 {
1045 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1046 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1047
1048 /*
1049 * If we are switching to a VMCS that was executed on a different host CPU or was
1050 * never executed before, flag that we need to export the host state before executing
1051 * guest/nested-guest code using hardware-assisted VMX.
1052 *
1053 * This could probably be done in a preemptible context since the preemption hook
1054 * will flag the necessary change in host context. However, since preemption is
1055 * already disabled and to avoid making assumptions about host specific code in
1056 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1057 * disabled.
1058 */
1059 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1060 { /* likely */ }
1061 else
1062 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1063
1064 ASMSetFlags(fEFlags);
1065
1066 /*
1067 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1068 * flag that we need to update the host MSR values there. Even if we decide in the
1069 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1070 * if its content differs, we would have to update the host MSRs anyway.
1071 */
1072 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1073 }
1074 else
1075 ASMSetFlags(fEFlags);
1076 return rc;
1077}
1078
1079#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1080#ifdef VBOX_STRICT
1081
1082/**
1083 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1084 * transient structure.
1085 *
1086 * @param pVCpu The cross context virtual CPU structure.
1087 * @param pVmxTransient The VMX-transient structure.
1088 */
1089DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1090{
1091 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1092 AssertRC(rc);
1093}
1094
1095
1096/**
1097 * Reads the VM-entry exception error code field from the VMCS into
1098 * the VMX transient structure.
1099 *
1100 * @param pVCpu The cross context virtual CPU structure.
1101 * @param pVmxTransient The VMX-transient structure.
1102 */
1103DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1104{
1105 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1106 AssertRC(rc);
1107}
1108
1109
1110/**
1111 * Reads the VM-entry exception error code field from the VMCS into
1112 * the VMX transient structure.
1113 *
1114 * @param pVCpu The cross context virtual CPU structure.
1115 * @param pVmxTransient The VMX-transient structure.
1116 */
1117DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1118{
1119 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1120 AssertRC(rc);
1121}
1122
1123#endif /* VBOX_STRICT */
1124
1125
1126/**
1127 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1128 *
1129 * Don't call directly unless the it's likely that some or all of the fields
1130 * given in @a a_fReadMask have already been read.
1131 *
1132 * @tparam a_fReadMask The fields to read.
1133 * @param pVCpu The cross context virtual CPU structure.
1134 * @param pVmxTransient The VMX-transient structure.
1135 */
1136template<uint32_t const a_fReadMask>
1137static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1138{
1139 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1140 | HMVMX_READ_EXIT_INSTR_LEN
1141 | HMVMX_READ_EXIT_INSTR_INFO
1142 | HMVMX_READ_IDT_VECTORING_INFO
1143 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1144 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1145 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1146 | HMVMX_READ_GUEST_LINEAR_ADDR
1147 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1148 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1149 )) == 0);
1150
1151 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1152 {
1153 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1154
1155 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1156 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1157 {
1158 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1159 AssertRC(rc);
1160 }
1161 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1162 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1163 {
1164 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1165 AssertRC(rc);
1166 }
1167 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1168 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1169 {
1170 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1171 AssertRC(rc);
1172 }
1173 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1174 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1175 {
1176 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1177 AssertRC(rc);
1178 }
1179 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1180 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1181 {
1182 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1183 AssertRC(rc);
1184 }
1185 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1186 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1187 {
1188 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1189 AssertRC(rc);
1190 }
1191 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1192 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1193 {
1194 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1195 AssertRC(rc);
1196 }
1197 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1198 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1199 {
1200 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1201 AssertRC(rc);
1202 }
1203 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1204 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1205 {
1206 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1207 AssertRC(rc);
1208 }
1209 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1210 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1211 {
1212 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1213 AssertRC(rc);
1214 }
1215
1216 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1217 }
1218}
1219
1220
1221/**
1222 * Reads VMCS fields into the VMXTRANSIENT structure.
1223 *
1224 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1225 * generating an optimized read sequences w/o any conditionals between in
1226 * non-strict builds.
1227 *
1228 * @tparam a_fReadMask The fields to read. One or more of the
1229 * HMVMX_READ_XXX fields ORed together.
1230 * @param pVCpu The cross context virtual CPU structure.
1231 * @param pVmxTransient The VMX-transient structure.
1232 */
1233template<uint32_t const a_fReadMask>
1234DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1235{
1236 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1237 | HMVMX_READ_EXIT_INSTR_LEN
1238 | HMVMX_READ_EXIT_INSTR_INFO
1239 | HMVMX_READ_IDT_VECTORING_INFO
1240 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1241 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1242 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1243 | HMVMX_READ_GUEST_LINEAR_ADDR
1244 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1245 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1246 )) == 0);
1247
1248 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1249 {
1250 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1251 {
1252 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1253 AssertRC(rc);
1254 }
1255 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1256 {
1257 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1258 AssertRC(rc);
1259 }
1260 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1261 {
1262 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1263 AssertRC(rc);
1264 }
1265 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1266 {
1267 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1268 AssertRC(rc);
1269 }
1270 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1271 {
1272 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1273 AssertRC(rc);
1274 }
1275 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1276 {
1277 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1278 AssertRC(rc);
1279 }
1280 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1281 {
1282 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1283 AssertRC(rc);
1284 }
1285 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1286 {
1287 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1288 AssertRC(rc);
1289 }
1290 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1291 {
1292 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1293 AssertRC(rc);
1294 }
1295 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1296 {
1297 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1298 AssertRC(rc);
1299 }
1300
1301 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1302 }
1303 else
1304 {
1305 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1306 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1307 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1308 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1309 }
1310}
1311
1312
1313#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1314/**
1315 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1316 *
1317 * @param pVCpu The cross context virtual CPU structure.
1318 * @param pVmxTransient The VMX-transient structure.
1319 */
1320static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1321{
1322 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1323 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1324 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1325 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1326 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1327 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1328 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1329 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1330 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1331 AssertRC(rc);
1332 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1333 | HMVMX_READ_EXIT_INSTR_LEN
1334 | HMVMX_READ_EXIT_INSTR_INFO
1335 | HMVMX_READ_IDT_VECTORING_INFO
1336 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1337 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1338 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1339 | HMVMX_READ_GUEST_LINEAR_ADDR
1340 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1341}
1342#endif
1343
1344/**
1345 * Verifies that our cached values of the VMCS fields are all consistent with
1346 * what's actually present in the VMCS.
1347 *
1348 * @returns VBox status code.
1349 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1350 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1351 * VMCS content. HMCPU error-field is
1352 * updated, see VMX_VCI_XXX.
1353 * @param pVCpu The cross context virtual CPU structure.
1354 * @param pVmcsInfo The VMCS info. object.
1355 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1356 */
1357static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1358{
1359 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1360
1361 uint32_t u32Val;
1362 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1363 AssertRC(rc);
1364 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1365 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1366 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1367 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1368
1369 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1370 AssertRC(rc);
1371 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1372 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1373 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1374 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1375
1376 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1377 AssertRC(rc);
1378 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1379 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1380 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1381 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1382
1383 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1384 AssertRC(rc);
1385 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1386 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1387 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1388 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1389
1390 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1391 {
1392 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1393 AssertRC(rc);
1394 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1395 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1396 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1397 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1398 }
1399
1400 uint64_t u64Val;
1401 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1402 {
1403 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1404 AssertRC(rc);
1405 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1406 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1407 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1408 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1409 }
1410
1411 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1412 AssertRC(rc);
1413 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1414 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1415 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1416 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1417
1418 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1419 AssertRC(rc);
1420 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1421 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1422 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1423 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1424
1425 NOREF(pcszVmcs);
1426 return VINF_SUCCESS;
1427}
1428
1429
1430/**
1431 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1432 * VMCS.
1433 *
1434 * This is typically required when the guest changes paging mode.
1435 *
1436 * @returns VBox status code.
1437 * @param pVCpu The cross context virtual CPU structure.
1438 * @param pVmxTransient The VMX-transient structure.
1439 *
1440 * @remarks Requires EFER.
1441 * @remarks No-long-jump zone!!!
1442 */
1443static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1444{
1445 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1446 {
1447 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1448 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1449
1450 /*
1451 * VM-entry controls.
1452 */
1453 {
1454 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1455 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1456
1457 /*
1458 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1459 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1460 *
1461 * For nested-guests, this is a mandatory VM-entry control. It's also
1462 * required because we do not want to leak host bits to the nested-guest.
1463 */
1464 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1465
1466 /*
1467 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1468 *
1469 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1470 * required to get the nested-guest working with hardware-assisted VMX execution.
1471 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1472 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1473 * here rather than while merging the guest VMCS controls.
1474 */
1475 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1476 {
1477 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1478 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1479 }
1480 else
1481 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1482
1483 /*
1484 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1485 *
1486 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1487 * regardless of whether the nested-guest VMCS specifies it because we are free to
1488 * load whatever MSRs we require and we do not need to modify the guest visible copy
1489 * of the VM-entry MSR load area.
1490 */
1491 if ( g_fHmVmxSupportsVmcsEfer
1492#ifndef IN_NEM_DARWIN
1493 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1494#endif
1495 )
1496 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1497 else
1498 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1499
1500 /*
1501 * The following should -not- be set (since we're not in SMM mode):
1502 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1503 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1504 */
1505
1506 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1507 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1508
1509 if ((fVal & fZap) == fVal)
1510 { /* likely */ }
1511 else
1512 {
1513 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1514 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1515 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1516 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1517 }
1518
1519 /* Commit it to the VMCS. */
1520 if (pVmcsInfo->u32EntryCtls != fVal)
1521 {
1522 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1523 AssertRC(rc);
1524 pVmcsInfo->u32EntryCtls = fVal;
1525 }
1526 }
1527
1528 /*
1529 * VM-exit controls.
1530 */
1531 {
1532 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1533 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1534
1535 /*
1536 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1537 * supported the 1-setting of this bit.
1538 *
1539 * For nested-guests, we set the "save debug controls" as the converse
1540 * "load debug controls" is mandatory for nested-guests anyway.
1541 */
1542 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1543
1544 /*
1545 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1546 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1547 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1548 * vmxHCExportHostMsrs().
1549 *
1550 * For nested-guests, we always set this bit as we do not support 32-bit
1551 * hosts.
1552 */
1553 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1554
1555#ifndef IN_NEM_DARWIN
1556 /*
1557 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1558 *
1559 * For nested-guests, we should use the "save IA32_EFER" control if we also
1560 * used the "load IA32_EFER" control while exporting VM-entry controls.
1561 */
1562 if ( g_fHmVmxSupportsVmcsEfer
1563 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1564 {
1565 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1566 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1567 }
1568#endif
1569
1570 /*
1571 * Enable saving of the VMX-preemption timer value on VM-exit.
1572 * For nested-guests, currently not exposed/used.
1573 */
1574 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1575 * the timer value. */
1576 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1577 {
1578 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1579 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1580 }
1581
1582 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1583 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1584
1585 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1586 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1587 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1588
1589 if ((fVal & fZap) == fVal)
1590 { /* likely */ }
1591 else
1592 {
1593 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1594 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1595 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1596 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1597 }
1598
1599 /* Commit it to the VMCS. */
1600 if (pVmcsInfo->u32ExitCtls != fVal)
1601 {
1602 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1603 AssertRC(rc);
1604 pVmcsInfo->u32ExitCtls = fVal;
1605 }
1606 }
1607
1608 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1609 }
1610 return VINF_SUCCESS;
1611}
1612
1613
1614/**
1615 * Sets the TPR threshold in the VMCS.
1616 *
1617 * @param pVCpu The cross context virtual CPU structure.
1618 * @param pVmcsInfo The VMCS info. object.
1619 * @param u32TprThreshold The TPR threshold (task-priority class only).
1620 */
1621DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1622{
1623 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1624 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1625 RT_NOREF(pVmcsInfo);
1626 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1627 AssertRC(rc);
1628}
1629
1630
1631/**
1632 * Exports the guest APIC TPR state into the VMCS.
1633 *
1634 * @param pVCpu The cross context virtual CPU structure.
1635 * @param pVmxTransient The VMX-transient structure.
1636 *
1637 * @remarks No-long-jump zone!!!
1638 */
1639static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1640{
1641 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1642 {
1643 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1644
1645 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1646 if (!pVmxTransient->fIsNestedGuest)
1647 {
1648 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1649 && APICIsEnabled(pVCpu))
1650 {
1651 /*
1652 * Setup TPR shadowing.
1653 */
1654 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1655 {
1656 bool fPendingIntr = false;
1657 uint8_t u8Tpr = 0;
1658 uint8_t u8PendingIntr = 0;
1659 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1660 AssertRC(rc);
1661
1662 /*
1663 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1664 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1665 * priority of the pending interrupt so we can deliver the interrupt. If there
1666 * are no interrupts pending, set threshold to 0 to not cause any
1667 * TPR-below-threshold VM-exits.
1668 */
1669 uint32_t u32TprThreshold = 0;
1670 if (fPendingIntr)
1671 {
1672 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1673 (which is the Task-Priority Class). */
1674 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1675 const uint8_t u8TprPriority = u8Tpr >> 4;
1676 if (u8PendingPriority <= u8TprPriority)
1677 u32TprThreshold = u8PendingPriority;
1678 }
1679
1680 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1681 }
1682 }
1683 }
1684 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1685 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1686 }
1687}
1688
1689
1690/**
1691 * Gets the guest interruptibility-state and updates related force-flags.
1692 *
1693 * @returns Guest's interruptibility-state.
1694 * @param pVCpu The cross context virtual CPU structure.
1695 *
1696 * @remarks No-long-jump zone!!!
1697 */
1698static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1699{
1700 /*
1701 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1702 */
1703 uint32_t fIntrState = 0;
1704 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1705 {
1706 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
1707 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
1708
1709 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1710 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
1711 {
1712 if (pCtx->eflags.Bits.u1IF)
1713 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1714 else
1715 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1716 }
1717 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1718 {
1719 /*
1720 * We can clear the inhibit force flag as even if we go back to the recompiler
1721 * without executing guest code in VT-x, the flag's condition to be cleared is
1722 * met and thus the cleared state is correct.
1723 */
1724 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1725 }
1726 }
1727
1728 /*
1729 * Check if we should inhibit NMI delivery.
1730 */
1731 if (CPUMIsGuestNmiBlocking(pVCpu))
1732 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1733
1734 /*
1735 * Validate.
1736 */
1737#ifdef VBOX_STRICT
1738 /* We don't support block-by-SMI yet.*/
1739 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1740
1741 /* Block-by-STI must not be set when interrupts are disabled. */
1742 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
1743 {
1744 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1745 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
1746 }
1747#endif
1748
1749 return fIntrState;
1750}
1751
1752
1753/**
1754 * Exports the exception intercepts required for guest execution in the VMCS.
1755 *
1756 * @param pVCpu The cross context virtual CPU structure.
1757 * @param pVmxTransient The VMX-transient structure.
1758 *
1759 * @remarks No-long-jump zone!!!
1760 */
1761static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1762{
1763 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1764 {
1765 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1766 if ( !pVmxTransient->fIsNestedGuest
1767 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1768 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1769 else
1770 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1771
1772 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1773 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1774 }
1775}
1776
1777
1778/**
1779 * Exports the guest's RIP into the guest-state area in the VMCS.
1780 *
1781 * @param pVCpu The cross context virtual CPU structure.
1782 *
1783 * @remarks No-long-jump zone!!!
1784 */
1785static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1786{
1787 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1788 {
1789 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1790
1791 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1792 AssertRC(rc);
1793
1794 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1795 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1796 }
1797}
1798
1799
1800/**
1801 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1802 *
1803 * @param pVCpu The cross context virtual CPU structure.
1804 * @param pVmxTransient The VMX-transient structure.
1805 *
1806 * @remarks No-long-jump zone!!!
1807 */
1808static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1809{
1810 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1811 {
1812 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1813
1814 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1815 Let us assert it as such and use 32-bit VMWRITE. */
1816 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1817 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1818 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
1819 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
1820
1821#ifndef IN_NEM_DARWIN
1822 /*
1823 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1824 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1825 * can run the real-mode guest code under Virtual 8086 mode.
1826 */
1827 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1828 if (pVmcsInfo->RealMode.fRealOnV86Active)
1829 {
1830 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1831 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1832 Assert(!pVmxTransient->fIsNestedGuest);
1833 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
1834 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
1835 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1836 }
1837#else
1838 RT_NOREF(pVmxTransient);
1839#endif
1840
1841 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
1842 AssertRC(rc);
1843
1844 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1845 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
1846 }
1847}
1848
1849
1850#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1851/**
1852 * Copies the nested-guest VMCS to the shadow VMCS.
1853 *
1854 * @returns VBox status code.
1855 * @param pVCpu The cross context virtual CPU structure.
1856 * @param pVmcsInfo The VMCS info. object.
1857 *
1858 * @remarks No-long-jump zone!!!
1859 */
1860static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1861{
1862 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1863 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1864
1865 /*
1866 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1867 * current VMCS, as we may try saving guest lazy MSRs.
1868 *
1869 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1870 * calling the import VMCS code which is currently performing the guest MSR reads
1871 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1872 * and the rest of the VMX leave session machinery.
1873 */
1874 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1875
1876 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1877 if (RT_SUCCESS(rc))
1878 {
1879 /*
1880 * Copy all guest read/write VMCS fields.
1881 *
1882 * We don't check for VMWRITE failures here for performance reasons and
1883 * because they are not expected to fail, barring irrecoverable conditions
1884 * like hardware errors.
1885 */
1886 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1887 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1888 {
1889 uint64_t u64Val;
1890 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1891 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1892 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1893 }
1894
1895 /*
1896 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1897 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1898 */
1899 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1900 {
1901 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1902 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1903 {
1904 uint64_t u64Val;
1905 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1906 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1907 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1908 }
1909 }
1910
1911 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1912 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1913 }
1914
1915 ASMSetFlags(fEFlags);
1916 return rc;
1917}
1918
1919
1920/**
1921 * Copies the shadow VMCS to the nested-guest VMCS.
1922 *
1923 * @returns VBox status code.
1924 * @param pVCpu The cross context virtual CPU structure.
1925 * @param pVmcsInfo The VMCS info. object.
1926 *
1927 * @remarks Called with interrupts disabled.
1928 */
1929static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1930{
1931 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1932 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1933 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1934
1935 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1936 if (RT_SUCCESS(rc))
1937 {
1938 /*
1939 * Copy guest read/write fields from the shadow VMCS.
1940 * Guest read-only fields cannot be modified, so no need to copy them.
1941 *
1942 * We don't check for VMREAD failures here for performance reasons and
1943 * because they are not expected to fail, barring irrecoverable conditions
1944 * like hardware errors.
1945 */
1946 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1947 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1948 {
1949 uint64_t u64Val;
1950 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1951 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1952 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1953 }
1954
1955 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1956 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1957 }
1958 return rc;
1959}
1960
1961
1962/**
1963 * Enables VMCS shadowing for the given VMCS info. object.
1964 *
1965 * @param pVCpu The cross context virtual CPU structure.
1966 * @param pVmcsInfo The VMCS info. object.
1967 *
1968 * @remarks No-long-jump zone!!!
1969 */
1970static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1971{
1972 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1973 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1974 {
1975 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1976 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1977 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1978 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1979 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1980 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1981 Log4Func(("Enabled\n"));
1982 }
1983}
1984
1985
1986/**
1987 * Disables VMCS shadowing for the given VMCS info. object.
1988 *
1989 * @param pVCpu The cross context virtual CPU structure.
1990 * @param pVmcsInfo The VMCS info. object.
1991 *
1992 * @remarks No-long-jump zone!!!
1993 */
1994static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1995{
1996 /*
1997 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1998 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1999 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2000 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2001 *
2002 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2003 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2004 */
2005 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2006 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2007 {
2008 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2009 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2010 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2011 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2012 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2013 Log4Func(("Disabled\n"));
2014 }
2015}
2016#endif
2017
2018
2019/**
2020 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2021 *
2022 * The guest FPU state is always pre-loaded hence we don't need to bother about
2023 * sharing FPU related CR0 bits between the guest and host.
2024 *
2025 * @returns VBox status code.
2026 * @param pVCpu The cross context virtual CPU structure.
2027 * @param pVmxTransient The VMX-transient structure.
2028 *
2029 * @remarks No-long-jump zone!!!
2030 */
2031static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2032{
2033 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2034 {
2035 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2036 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2037
2038 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2039 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2040 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2041 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2042 else
2043 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2044
2045 if (!pVmxTransient->fIsNestedGuest)
2046 {
2047 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2048 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2049 uint64_t const u64ShadowCr0 = u64GuestCr0;
2050 Assert(!RT_HI_U32(u64GuestCr0));
2051
2052 /*
2053 * Setup VT-x's view of the guest CR0.
2054 */
2055 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2056 if (VM_IS_VMX_NESTED_PAGING(pVM))
2057 {
2058#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2059 if (CPUMIsGuestPagingEnabled(pVCpu))
2060 {
2061 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2062 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2063 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2064 }
2065 else
2066 {
2067 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2068 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2069 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2070 }
2071
2072 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2073 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2074 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2075#endif
2076 }
2077 else
2078 {
2079 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2080 u64GuestCr0 |= X86_CR0_WP;
2081 }
2082
2083 /*
2084 * Guest FPU bits.
2085 *
2086 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2087 * using CR0.TS.
2088 *
2089 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2090 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2091 */
2092 u64GuestCr0 |= X86_CR0_NE;
2093
2094 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2095 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2096
2097 /*
2098 * Update exception intercepts.
2099 */
2100 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2101#ifndef IN_NEM_DARWIN
2102 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2103 {
2104 Assert(PDMVmmDevHeapIsEnabled(pVM));
2105 Assert(pVM->hm.s.vmx.pRealModeTSS);
2106 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2107 }
2108 else
2109#endif
2110 {
2111 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2112 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2113 if (fInterceptMF)
2114 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2115 }
2116
2117 /* Additional intercepts for debugging, define these yourself explicitly. */
2118#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2119 uXcptBitmap |= 0
2120 | RT_BIT(X86_XCPT_BP)
2121 | RT_BIT(X86_XCPT_DE)
2122 | RT_BIT(X86_XCPT_NM)
2123 | RT_BIT(X86_XCPT_TS)
2124 | RT_BIT(X86_XCPT_UD)
2125 | RT_BIT(X86_XCPT_NP)
2126 | RT_BIT(X86_XCPT_SS)
2127 | RT_BIT(X86_XCPT_GP)
2128 | RT_BIT(X86_XCPT_PF)
2129 | RT_BIT(X86_XCPT_MF)
2130 ;
2131#elif defined(HMVMX_ALWAYS_TRAP_PF)
2132 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2133#endif
2134 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2135 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2136 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2137 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2138 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2139
2140 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2141 u64GuestCr0 |= fSetCr0;
2142 u64GuestCr0 &= fZapCr0;
2143 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2144
2145 /* Commit the CR0 and related fields to the guest VMCS. */
2146 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2147 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2148 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2149 {
2150 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2151 AssertRC(rc);
2152 }
2153 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2154 {
2155 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2156 AssertRC(rc);
2157 }
2158
2159 /* Update our caches. */
2160 pVmcsInfo->u32ProcCtls = uProcCtls;
2161 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2162
2163 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2164 }
2165 else
2166 {
2167 /*
2168 * With nested-guests, we may have extended the guest/host mask here since we
2169 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2170 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2171 * originally supplied. We must copy those bits from the nested-guest CR0 into
2172 * the nested-guest CR0 read-shadow.
2173 */
2174 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2175 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2176 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2177 Assert(!RT_HI_U32(u64GuestCr0));
2178 Assert(u64GuestCr0 & X86_CR0_NE);
2179
2180 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2181 u64GuestCr0 |= fSetCr0;
2182 u64GuestCr0 &= fZapCr0;
2183 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2184
2185 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2186 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2187 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2188
2189 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2190 }
2191
2192 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2193 }
2194
2195 return VINF_SUCCESS;
2196}
2197
2198
2199/**
2200 * Exports the guest control registers (CR3, CR4) into the guest-state area
2201 * in the VMCS.
2202 *
2203 * @returns VBox strict status code.
2204 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2205 * without unrestricted guest access and the VMMDev is not presently
2206 * mapped (e.g. EFI32).
2207 *
2208 * @param pVCpu The cross context virtual CPU structure.
2209 * @param pVmxTransient The VMX-transient structure.
2210 *
2211 * @remarks No-long-jump zone!!!
2212 */
2213static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2214{
2215 int rc = VINF_SUCCESS;
2216 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2217
2218 /*
2219 * Guest CR2.
2220 * It's always loaded in the assembler code. Nothing to do here.
2221 */
2222
2223 /*
2224 * Guest CR3.
2225 */
2226 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2227 {
2228 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2229
2230 if (VM_IS_VMX_NESTED_PAGING(pVM))
2231 {
2232#ifndef IN_NEM_DARWIN
2233 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2234 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2235
2236 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2237 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2238 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2239 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2240
2241 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2242 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2243 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2244
2245 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2246 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2247 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2248 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2249 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2250 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2251 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2252
2253 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2254 AssertRC(rc);
2255#endif
2256
2257 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2258 uint64_t u64GuestCr3 = pCtx->cr3;
2259 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2260 || CPUMIsGuestPagingEnabledEx(pCtx))
2261 {
2262 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2263 if (CPUMIsGuestInPAEModeEx(pCtx))
2264 {
2265 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2266 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2267 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2268 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2269 }
2270
2271 /*
2272 * The guest's view of its CR3 is unblemished with nested paging when the
2273 * guest is using paging or we have unrestricted guest execution to handle
2274 * the guest when it's not using paging.
2275 */
2276 }
2277#ifndef IN_NEM_DARWIN
2278 else
2279 {
2280 /*
2281 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2282 * thinks it accesses physical memory directly, we use our identity-mapped
2283 * page table to map guest-linear to guest-physical addresses. EPT takes care
2284 * of translating it to host-physical addresses.
2285 */
2286 RTGCPHYS GCPhys;
2287 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2288
2289 /* We obtain it here every time as the guest could have relocated this PCI region. */
2290 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2291 if (RT_SUCCESS(rc))
2292 { /* likely */ }
2293 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2294 {
2295 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2296 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2297 }
2298 else
2299 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2300
2301 u64GuestCr3 = GCPhys;
2302 }
2303#endif
2304
2305 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2306 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2307 AssertRC(rc);
2308 }
2309 else
2310 {
2311 Assert(!pVmxTransient->fIsNestedGuest);
2312 /* Non-nested paging case, just use the hypervisor's CR3. */
2313 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2314
2315 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2316 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2317 AssertRC(rc);
2318 }
2319
2320 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2321 }
2322
2323 /*
2324 * Guest CR4.
2325 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2326 */
2327 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2328 {
2329 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2330 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2331
2332 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2333 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2334
2335 /*
2336 * With nested-guests, we may have extended the guest/host mask here (since we
2337 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2338 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2339 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2340 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2341 */
2342 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2343 uint64_t u64GuestCr4 = pCtx->cr4;
2344 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2345 ? pCtx->cr4
2346 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2347 Assert(!RT_HI_U32(u64GuestCr4));
2348
2349#ifndef IN_NEM_DARWIN
2350 /*
2351 * Setup VT-x's view of the guest CR4.
2352 *
2353 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2354 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2355 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2356 *
2357 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2358 */
2359 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2360 {
2361 Assert(pVM->hm.s.vmx.pRealModeTSS);
2362 Assert(PDMVmmDevHeapIsEnabled(pVM));
2363 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2364 }
2365#endif
2366
2367 if (VM_IS_VMX_NESTED_PAGING(pVM))
2368 {
2369 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2370 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2371 {
2372 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2373 u64GuestCr4 |= X86_CR4_PSE;
2374 /* Our identity mapping is a 32-bit page directory. */
2375 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2376 }
2377 /* else use guest CR4.*/
2378 }
2379 else
2380 {
2381 Assert(!pVmxTransient->fIsNestedGuest);
2382
2383 /*
2384 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2385 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2386 */
2387 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2388 {
2389 case PGMMODE_REAL: /* Real-mode. */
2390 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2391 case PGMMODE_32_BIT: /* 32-bit paging. */
2392 {
2393 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2394 break;
2395 }
2396
2397 case PGMMODE_PAE: /* PAE paging. */
2398 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2399 {
2400 u64GuestCr4 |= X86_CR4_PAE;
2401 break;
2402 }
2403
2404 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2405 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2406 {
2407#ifdef VBOX_WITH_64_BITS_GUESTS
2408 /* For our assumption in vmxHCShouldSwapEferMsr. */
2409 Assert(u64GuestCr4 & X86_CR4_PAE);
2410 break;
2411#endif
2412 }
2413 default:
2414 AssertFailed();
2415 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2416 }
2417 }
2418
2419 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2420 u64GuestCr4 |= fSetCr4;
2421 u64GuestCr4 &= fZapCr4;
2422
2423 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2424 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2425 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2426
2427#ifndef IN_NEM_DARWIN
2428 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2429 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2430 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2431 {
2432 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2433 hmR0VmxUpdateStartVmFunction(pVCpu);
2434 }
2435#endif
2436
2437 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2438
2439 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2440 }
2441 return rc;
2442}
2443
2444
2445#ifdef VBOX_STRICT
2446/**
2447 * Strict function to validate segment registers.
2448 *
2449 * @param pVCpu The cross context virtual CPU structure.
2450 * @param pVmcsInfo The VMCS info. object.
2451 *
2452 * @remarks Will import guest CR0 on strict builds during validation of
2453 * segments.
2454 */
2455static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2456{
2457 /*
2458 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2459 *
2460 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2461 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2462 * unusable bit and doesn't change the guest-context value.
2463 */
2464 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2465 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2466 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2467 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2468 && ( !CPUMIsGuestInRealModeEx(pCtx)
2469 && !CPUMIsGuestInV86ModeEx(pCtx)))
2470 {
2471 /* Protected mode checks */
2472 /* CS */
2473 Assert(pCtx->cs.Attr.n.u1Present);
2474 Assert(!(pCtx->cs.Attr.u & 0xf00));
2475 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2476 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2477 || !(pCtx->cs.Attr.n.u1Granularity));
2478 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2479 || (pCtx->cs.Attr.n.u1Granularity));
2480 /* CS cannot be loaded with NULL in protected mode. */
2481 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2482 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2483 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2484 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2485 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2486 else
2487 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2488 /* SS */
2489 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2490 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2491 if ( !(pCtx->cr0 & X86_CR0_PE)
2492 || pCtx->cs.Attr.n.u4Type == 3)
2493 {
2494 Assert(!pCtx->ss.Attr.n.u2Dpl);
2495 }
2496 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2497 {
2498 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2499 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2500 Assert(pCtx->ss.Attr.n.u1Present);
2501 Assert(!(pCtx->ss.Attr.u & 0xf00));
2502 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2503 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2504 || !(pCtx->ss.Attr.n.u1Granularity));
2505 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2506 || (pCtx->ss.Attr.n.u1Granularity));
2507 }
2508 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2509 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2510 {
2511 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2512 Assert(pCtx->ds.Attr.n.u1Present);
2513 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2514 Assert(!(pCtx->ds.Attr.u & 0xf00));
2515 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2516 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2517 || !(pCtx->ds.Attr.n.u1Granularity));
2518 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2519 || (pCtx->ds.Attr.n.u1Granularity));
2520 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2521 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2522 }
2523 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2524 {
2525 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2526 Assert(pCtx->es.Attr.n.u1Present);
2527 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2528 Assert(!(pCtx->es.Attr.u & 0xf00));
2529 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2530 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2531 || !(pCtx->es.Attr.n.u1Granularity));
2532 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2533 || (pCtx->es.Attr.n.u1Granularity));
2534 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2535 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2536 }
2537 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2538 {
2539 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2540 Assert(pCtx->fs.Attr.n.u1Present);
2541 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2542 Assert(!(pCtx->fs.Attr.u & 0xf00));
2543 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2544 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2545 || !(pCtx->fs.Attr.n.u1Granularity));
2546 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2547 || (pCtx->fs.Attr.n.u1Granularity));
2548 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2549 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2550 }
2551 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2552 {
2553 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2554 Assert(pCtx->gs.Attr.n.u1Present);
2555 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2556 Assert(!(pCtx->gs.Attr.u & 0xf00));
2557 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2558 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2559 || !(pCtx->gs.Attr.n.u1Granularity));
2560 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2561 || (pCtx->gs.Attr.n.u1Granularity));
2562 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2563 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2564 }
2565 /* 64-bit capable CPUs. */
2566 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2567 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2568 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2569 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2570 }
2571 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2572 || ( CPUMIsGuestInRealModeEx(pCtx)
2573 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2574 {
2575 /* Real and v86 mode checks. */
2576 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2577 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2578#ifndef IN_NEM_DARWIN
2579 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2580 {
2581 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2582 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2583 }
2584 else
2585#endif
2586 {
2587 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2588 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2589 }
2590
2591 /* CS */
2592 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2593 Assert(pCtx->cs.u32Limit == 0xffff);
2594 Assert(u32CSAttr == 0xf3);
2595 /* SS */
2596 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2597 Assert(pCtx->ss.u32Limit == 0xffff);
2598 Assert(u32SSAttr == 0xf3);
2599 /* DS */
2600 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2601 Assert(pCtx->ds.u32Limit == 0xffff);
2602 Assert(u32DSAttr == 0xf3);
2603 /* ES */
2604 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2605 Assert(pCtx->es.u32Limit == 0xffff);
2606 Assert(u32ESAttr == 0xf3);
2607 /* FS */
2608 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2609 Assert(pCtx->fs.u32Limit == 0xffff);
2610 Assert(u32FSAttr == 0xf3);
2611 /* GS */
2612 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2613 Assert(pCtx->gs.u32Limit == 0xffff);
2614 Assert(u32GSAttr == 0xf3);
2615 /* 64-bit capable CPUs. */
2616 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2617 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2618 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2619 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2620 }
2621}
2622#endif /* VBOX_STRICT */
2623
2624
2625/**
2626 * Exports a guest segment register into the guest-state area in the VMCS.
2627 *
2628 * @returns VBox status code.
2629 * @param pVCpu The cross context virtual CPU structure.
2630 * @param pVmcsInfo The VMCS info. object.
2631 * @param iSegReg The segment register number (X86_SREG_XXX).
2632 * @param pSelReg Pointer to the segment selector.
2633 *
2634 * @remarks No-long-jump zone!!!
2635 */
2636static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2637{
2638 Assert(iSegReg < X86_SREG_COUNT);
2639
2640 uint32_t u32Access = pSelReg->Attr.u;
2641#ifndef IN_NEM_DARWIN
2642 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2643#endif
2644 {
2645 /*
2646 * The way to differentiate between whether this is really a null selector or was just
2647 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2648 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2649 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2650 * NULL selectors loaded in protected-mode have their attribute as 0.
2651 */
2652 if (u32Access)
2653 { }
2654 else
2655 u32Access = X86DESCATTR_UNUSABLE;
2656 }
2657#ifndef IN_NEM_DARWIN
2658 else
2659 {
2660 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2661 u32Access = 0xf3;
2662 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2663 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2664 RT_NOREF_PV(pVCpu);
2665 }
2666#else
2667 RT_NOREF(pVmcsInfo);
2668#endif
2669
2670 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2671 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2672 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2673
2674 /*
2675 * Commit it to the VMCS.
2676 */
2677 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2678 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2679 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2680 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2681 return VINF_SUCCESS;
2682}
2683
2684
2685/**
2686 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2687 * area in the VMCS.
2688 *
2689 * @returns VBox status code.
2690 * @param pVCpu The cross context virtual CPU structure.
2691 * @param pVmxTransient The VMX-transient structure.
2692 *
2693 * @remarks Will import guest CR0 on strict builds during validation of
2694 * segments.
2695 * @remarks No-long-jump zone!!!
2696 */
2697static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2698{
2699 int rc = VERR_INTERNAL_ERROR_5;
2700#ifndef IN_NEM_DARWIN
2701 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2702#endif
2703 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2704 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2705#ifndef IN_NEM_DARWIN
2706 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2707#endif
2708
2709 /*
2710 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2711 */
2712 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2713 {
2714 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2715 {
2716 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2717#ifndef IN_NEM_DARWIN
2718 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2719 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2720#endif
2721 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2722 AssertRC(rc);
2723 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2724 }
2725
2726 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2727 {
2728 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2729#ifndef IN_NEM_DARWIN
2730 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2731 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2732#endif
2733 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2734 AssertRC(rc);
2735 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2736 }
2737
2738 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2739 {
2740 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2741#ifndef IN_NEM_DARWIN
2742 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2743 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2744#endif
2745 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2746 AssertRC(rc);
2747 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2748 }
2749
2750 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2751 {
2752 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2753#ifndef IN_NEM_DARWIN
2754 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2755 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2756#endif
2757 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2758 AssertRC(rc);
2759 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2760 }
2761
2762 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2763 {
2764 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2765#ifndef IN_NEM_DARWIN
2766 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2767 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2768#endif
2769 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2770 AssertRC(rc);
2771 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2772 }
2773
2774 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2775 {
2776 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2777#ifndef IN_NEM_DARWIN
2778 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2779 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2780#endif
2781 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2782 AssertRC(rc);
2783 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2784 }
2785
2786#ifdef VBOX_STRICT
2787 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2788#endif
2789 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2790 pCtx->cs.Attr.u));
2791 }
2792
2793 /*
2794 * Guest TR.
2795 */
2796 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2797 {
2798 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2799
2800 /*
2801 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2802 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2803 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2804 */
2805 uint16_t u16Sel;
2806 uint32_t u32Limit;
2807 uint64_t u64Base;
2808 uint32_t u32AccessRights;
2809#ifndef IN_NEM_DARWIN
2810 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2811#endif
2812 {
2813 u16Sel = pCtx->tr.Sel;
2814 u32Limit = pCtx->tr.u32Limit;
2815 u64Base = pCtx->tr.u64Base;
2816 u32AccessRights = pCtx->tr.Attr.u;
2817 }
2818#ifndef IN_NEM_DARWIN
2819 else
2820 {
2821 Assert(!pVmxTransient->fIsNestedGuest);
2822 Assert(pVM->hm.s.vmx.pRealModeTSS);
2823 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2824
2825 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2826 RTGCPHYS GCPhys;
2827 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2828 AssertRCReturn(rc, rc);
2829
2830 X86DESCATTR DescAttr;
2831 DescAttr.u = 0;
2832 DescAttr.n.u1Present = 1;
2833 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2834
2835 u16Sel = 0;
2836 u32Limit = HM_VTX_TSS_SIZE;
2837 u64Base = GCPhys;
2838 u32AccessRights = DescAttr.u;
2839 }
2840#endif
2841
2842 /* Validate. */
2843 Assert(!(u16Sel & RT_BIT(2)));
2844 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2845 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2846 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2847 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2848 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2849 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2850 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2851 Assert( (u32Limit & 0xfff) == 0xfff
2852 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2853 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2854 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2855
2856 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2857 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2858 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2859 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2860
2861 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2862 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2863 }
2864
2865 /*
2866 * Guest GDTR.
2867 */
2868 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2869 {
2870 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2871
2872 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2873 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2874
2875 /* Validate. */
2876 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2877
2878 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2879 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2880 }
2881
2882 /*
2883 * Guest LDTR.
2884 */
2885 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2886 {
2887 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2888
2889 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2890 uint32_t u32Access;
2891 if ( !pVmxTransient->fIsNestedGuest
2892 && !pCtx->ldtr.Attr.u)
2893 u32Access = X86DESCATTR_UNUSABLE;
2894 else
2895 u32Access = pCtx->ldtr.Attr.u;
2896
2897 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2898 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2899 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2900 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2901
2902 /* Validate. */
2903 if (!(u32Access & X86DESCATTR_UNUSABLE))
2904 {
2905 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2906 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2907 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2908 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2909 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2910 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2911 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2912 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2913 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2914 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2915 }
2916
2917 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2918 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2919 }
2920
2921 /*
2922 * Guest IDTR.
2923 */
2924 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2925 {
2926 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2927
2928 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2929 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2930
2931 /* Validate. */
2932 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2933
2934 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2935 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2936 }
2937
2938 return VINF_SUCCESS;
2939}
2940
2941
2942/**
2943 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2944 * VM-exit interruption info type.
2945 *
2946 * @returns The IEM exception flags.
2947 * @param uVector The event vector.
2948 * @param uVmxEventType The VMX event type.
2949 *
2950 * @remarks This function currently only constructs flags required for
2951 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2952 * and CR2 aspects of an exception are not included).
2953 */
2954static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2955{
2956 uint32_t fIemXcptFlags;
2957 switch (uVmxEventType)
2958 {
2959 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2960 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2961 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2962 break;
2963
2964 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2965 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2966 break;
2967
2968 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2969 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2970 break;
2971
2972 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2973 {
2974 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2975 if (uVector == X86_XCPT_BP)
2976 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2977 else if (uVector == X86_XCPT_OF)
2978 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2979 else
2980 {
2981 fIemXcptFlags = 0;
2982 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2983 }
2984 break;
2985 }
2986
2987 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2988 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2989 break;
2990
2991 default:
2992 fIemXcptFlags = 0;
2993 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
2994 break;
2995 }
2996 return fIemXcptFlags;
2997}
2998
2999
3000/**
3001 * Sets an event as a pending event to be injected into the guest.
3002 *
3003 * @param pVCpu The cross context virtual CPU structure.
3004 * @param u32IntInfo The VM-entry interruption-information field.
3005 * @param cbInstr The VM-entry instruction length in bytes (for
3006 * software interrupts, exceptions and privileged
3007 * software exceptions).
3008 * @param u32ErrCode The VM-entry exception error code.
3009 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3010 * page-fault.
3011 */
3012DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3013 RTGCUINTPTR GCPtrFaultAddress)
3014{
3015 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3016 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3017 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3018 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3019 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3020 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3021}
3022
3023
3024/**
3025 * Sets an external interrupt as pending-for-injection into the VM.
3026 *
3027 * @param pVCpu The cross context virtual CPU structure.
3028 * @param u8Interrupt The external interrupt vector.
3029 */
3030DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3031{
3032 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3033 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3034 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3035 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3036 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3037}
3038
3039
3040/**
3041 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3042 *
3043 * @param pVCpu The cross context virtual CPU structure.
3044 */
3045DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3046{
3047 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3048 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3049 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3050 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3051 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3052}
3053
3054
3055/**
3056 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3057 *
3058 * @param pVCpu The cross context virtual CPU structure.
3059 */
3060DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3061{
3062 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3063 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3064 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3065 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3066 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3067}
3068
3069
3070/**
3071 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3072 *
3073 * @param pVCpu The cross context virtual CPU structure.
3074 */
3075DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3076{
3077 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3078 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3079 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3080 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3081 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3082}
3083
3084
3085/**
3086 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3087 *
3088 * @param pVCpu The cross context virtual CPU structure.
3089 */
3090DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3091{
3092 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3093 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3094 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3095 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3096 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3097}
3098
3099
3100#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3101/**
3102 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3103 *
3104 * @param pVCpu The cross context virtual CPU structure.
3105 * @param u32ErrCode The error code for the general-protection exception.
3106 */
3107DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3108{
3109 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3110 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3111 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3112 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3113 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3114}
3115
3116
3117/**
3118 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3119 *
3120 * @param pVCpu The cross context virtual CPU structure.
3121 * @param u32ErrCode The error code for the stack exception.
3122 */
3123DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3124{
3125 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3126 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3127 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3128 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3129 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3130}
3131#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3132
3133
3134/**
3135 * Fixes up attributes for the specified segment register.
3136 *
3137 * @param pVCpu The cross context virtual CPU structure.
3138 * @param pSelReg The segment register that needs fixing.
3139 * @param pszRegName The register name (for logging and assertions).
3140 */
3141static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3142{
3143 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3144
3145 /*
3146 * If VT-x marks the segment as unusable, most other bits remain undefined:
3147 * - For CS the L, D and G bits have meaning.
3148 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3149 * - For the remaining data segments no bits are defined.
3150 *
3151 * The present bit and the unusable bit has been observed to be set at the
3152 * same time (the selector was supposed to be invalid as we started executing
3153 * a V8086 interrupt in ring-0).
3154 *
3155 * What should be important for the rest of the VBox code, is that the P bit is
3156 * cleared. Some of the other VBox code recognizes the unusable bit, but
3157 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3158 * safe side here, we'll strip off P and other bits we don't care about. If
3159 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3160 *
3161 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3162 */
3163#ifdef VBOX_STRICT
3164 uint32_t const uAttr = pSelReg->Attr.u;
3165#endif
3166
3167 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3168 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3169 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3170
3171#ifdef VBOX_STRICT
3172# ifndef IN_NEM_DARWIN
3173 VMMRZCallRing3Disable(pVCpu);
3174# endif
3175 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3176# ifdef DEBUG_bird
3177 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3178 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3179 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3180# endif
3181# ifndef IN_NEM_DARWIN
3182 VMMRZCallRing3Enable(pVCpu);
3183# endif
3184 NOREF(uAttr);
3185#endif
3186 RT_NOREF2(pVCpu, pszRegName);
3187}
3188
3189
3190/**
3191 * Imports a guest segment register from the current VMCS into the guest-CPU
3192 * context.
3193 *
3194 * @param pVCpu The cross context virtual CPU structure.
3195 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3196 *
3197 * @remarks Called with interrupts and/or preemption disabled.
3198 */
3199template<uint32_t const a_iSegReg>
3200DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3201{
3202 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3203 /* Check that the macros we depend upon here and in the export parenter function works: */
3204#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3205 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3206 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3207 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3208 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3209 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3210 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3211 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3212 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3213 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3214 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3215
3216 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3217
3218 uint16_t u16Sel;
3219 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3220 pSelReg->Sel = u16Sel;
3221 pSelReg->ValidSel = u16Sel;
3222
3223 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3224 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3225
3226 uint32_t u32Attr;
3227 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3228 pSelReg->Attr.u = u32Attr;
3229 if (u32Attr & X86DESCATTR_UNUSABLE)
3230 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3231
3232 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3233}
3234
3235
3236/**
3237 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3238 *
3239 * @param pVCpu The cross context virtual CPU structure.
3240 *
3241 * @remarks Called with interrupts and/or preemption disabled.
3242 */
3243DECLINLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3244{
3245 uint16_t u16Sel;
3246 uint64_t u64Base;
3247 uint32_t u32Limit, u32Attr;
3248 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3249 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3250 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3251 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3252
3253 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3254 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3255 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3256 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3257 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3258 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3259 if (u32Attr & X86DESCATTR_UNUSABLE)
3260 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3261}
3262
3263
3264/**
3265 * Imports the guest TR from the current VMCS into the guest-CPU context.
3266 *
3267 * @param pVCpu The cross context virtual CPU structure.
3268 *
3269 * @remarks Called with interrupts and/or preemption disabled.
3270 */
3271DECLINLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3272{
3273 uint16_t u16Sel;
3274 uint64_t u64Base;
3275 uint32_t u32Limit, u32Attr;
3276 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3277 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3278 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3279 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3280
3281 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3282 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3283 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3284 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3285 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3286 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3287 /* TR is the only selector that can never be unusable. */
3288 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3289}
3290
3291
3292/**
3293 * Core: Imports the guest RIP from the VMCS back into the guest-CPU context.
3294 *
3295 * @returns The RIP value.
3296 * @param pVCpu The cross context virtual CPU structure.
3297 *
3298 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3299 * @remarks Do -not- call this function directly!
3300 */
3301DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3302{
3303 uint64_t u64Val;
3304 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3305 AssertRC(rc);
3306
3307 pVCpu->cpum.GstCtx.rip = u64Val;
3308
3309 return u64Val;
3310}
3311
3312
3313/**
3314 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3315 *
3316 * @param pVCpu The cross context virtual CPU structure.
3317 *
3318 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3319 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3320 * instead!!!
3321 */
3322DECLINLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3323{
3324 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3325 {
3326 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3327 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3328 }
3329}
3330
3331
3332/**
3333 * Core: Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3334 *
3335 * @param pVCpu The cross context virtual CPU structure.
3336 * @param pVmcsInfo The VMCS info. object.
3337 *
3338 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3339 * @remarks Do -not- call this function directly!
3340 */
3341DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3342{
3343 uint64_t u64Val;
3344 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3345 AssertRC(rc);
3346
3347 pVCpu->cpum.GstCtx.rflags.u64 = u64Val;
3348#ifndef IN_NEM_DARWIN
3349 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3350 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3351 {
3352 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3353 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3354 }
3355#else
3356 RT_NOREF(pVmcsInfo);
3357#endif
3358}
3359
3360
3361/**
3362 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3363 *
3364 * @param pVCpu The cross context virtual CPU structure.
3365 * @param pVmcsInfo The VMCS info. object.
3366 *
3367 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3368 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3369 * instead!!!
3370 */
3371DECLINLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3372{
3373 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3374 {
3375 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3376 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3377 }
3378}
3379
3380
3381/**
3382 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3383 * context.
3384 *
3385 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3386 *
3387 * @param pVCpu The cross context virtual CPU structure.
3388 * @param pVmcsInfo The VMCS info. object.
3389 *
3390 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3391 * do not log!
3392 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3393 * instead!!!
3394 */
3395DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3396{
3397 uint32_t u32Val;
3398 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3399 if (!u32Val)
3400 {
3401 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3402 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3403/** @todo r=bird: This is a call which isn't necessary most of the time, this
3404 * path is taken on basically all exits. Try find a way to eliminating it. */
3405 CPUMSetGuestNmiBlocking(pVCpu, false);
3406 }
3407 else
3408 {
3409/** @todo consider this branch for non-inlining. */
3410 /*
3411 * We must import RIP here to set our EM interrupt-inhibited state.
3412 * We also import RFLAGS as our code that evaluates pending interrupts
3413 * before VM-entry requires it.
3414 */
3415 vmxHCImportGuestRip(pVCpu);
3416 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3417
3418 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3419 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
3420 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3421 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3422
3423 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
3424 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
3425 }
3426}
3427
3428
3429/**
3430 * Worker for VMXR0ImportStateOnDemand.
3431 *
3432 * @returns VBox status code.
3433 * @param pVCpu The cross context virtual CPU structure.
3434 * @param pVmcsInfo The VMCS info. object.
3435 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3436 */
3437static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3438{
3439 int rc = VINF_SUCCESS;
3440 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3441 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3442 uint32_t u32Val;
3443
3444 /*
3445 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3446 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3447 * neither are other host platforms.
3448 *
3449 * Committing this temporarily as it prevents BSOD.
3450 *
3451 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3452 */
3453#ifdef RT_OS_WINDOWS
3454 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3455 return VERR_HM_IPE_1;
3456#endif
3457
3458 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3459
3460#ifndef IN_NEM_DARWIN
3461 /*
3462 * We disable interrupts to make the updating of the state and in particular
3463 * the fExtrn modification atomic wrt to preemption hooks.
3464 */
3465 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3466#endif
3467
3468 fWhat &= pCtx->fExtrn;
3469 if (fWhat)
3470 {
3471 do
3472 {
3473 if (fWhat & CPUMCTX_EXTRN_RIP)
3474 vmxHCImportGuestRip(pVCpu);
3475
3476 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3477 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3478
3479 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3480 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3481 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3482
3483 if (fWhat & CPUMCTX_EXTRN_RSP)
3484 {
3485 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3486 AssertRC(rc);
3487 }
3488
3489 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3490 {
3491 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3492#ifndef IN_NEM_DARWIN
3493 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3494#else
3495 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3496#endif
3497 if (fWhat & CPUMCTX_EXTRN_CS)
3498 {
3499 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3500 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3501 if (fRealOnV86Active)
3502 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3503 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3504 }
3505 if (fWhat & CPUMCTX_EXTRN_SS)
3506 {
3507 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3508 if (fRealOnV86Active)
3509 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3510 }
3511 if (fWhat & CPUMCTX_EXTRN_DS)
3512 {
3513 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3514 if (fRealOnV86Active)
3515 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3516 }
3517 if (fWhat & CPUMCTX_EXTRN_ES)
3518 {
3519 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3520 if (fRealOnV86Active)
3521 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3522 }
3523 if (fWhat & CPUMCTX_EXTRN_FS)
3524 {
3525 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3526 if (fRealOnV86Active)
3527 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3528 }
3529 if (fWhat & CPUMCTX_EXTRN_GS)
3530 {
3531 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3532 if (fRealOnV86Active)
3533 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3534 }
3535 }
3536
3537 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3538 {
3539 if (fWhat & CPUMCTX_EXTRN_LDTR)
3540 vmxHCImportGuestLdtr(pVCpu);
3541
3542 if (fWhat & CPUMCTX_EXTRN_GDTR)
3543 {
3544 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3545 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3546 pCtx->gdtr.cbGdt = u32Val;
3547 }
3548
3549 /* Guest IDTR. */
3550 if (fWhat & CPUMCTX_EXTRN_IDTR)
3551 {
3552 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3553 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3554 pCtx->idtr.cbIdt = u32Val;
3555 }
3556
3557 /* Guest TR. */
3558 if (fWhat & CPUMCTX_EXTRN_TR)
3559 {
3560#ifndef IN_NEM_DARWIN
3561 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3562 don't need to import that one. */
3563 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3564#endif
3565 vmxHCImportGuestTr(pVCpu);
3566 }
3567 }
3568
3569 if (fWhat & CPUMCTX_EXTRN_DR7)
3570 {
3571#ifndef IN_NEM_DARWIN
3572 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3573#endif
3574 {
3575 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3576 AssertRC(rc);
3577 }
3578 }
3579
3580 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3581 {
3582 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3583 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3584 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3585 pCtx->SysEnter.cs = u32Val;
3586 }
3587
3588#ifndef IN_NEM_DARWIN
3589 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3590 {
3591 if ( pVM->hmr0.s.fAllow64BitGuests
3592 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3593 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3594 }
3595
3596 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3597 {
3598 if ( pVM->hmr0.s.fAllow64BitGuests
3599 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3600 {
3601 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3602 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3603 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3604 }
3605 }
3606
3607 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3608 {
3609 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3610 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3611 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3612 Assert(pMsrs);
3613 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3614 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3615 for (uint32_t i = 0; i < cMsrs; i++)
3616 {
3617 uint32_t const idMsr = pMsrs[i].u32Msr;
3618 switch (idMsr)
3619 {
3620 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3621 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3622 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3623 default:
3624 {
3625 uint32_t idxLbrMsr;
3626 if (VM_IS_VMX_LBR(pVM))
3627 {
3628 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3629 {
3630 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3631 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3632 break;
3633 }
3634 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3635 {
3636 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3637 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3638 break;
3639 }
3640 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3641 {
3642 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3643 break;
3644 }
3645 /* Fallthru (no break) */
3646 }
3647 pCtx->fExtrn = 0;
3648 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3649 ASMSetFlags(fEFlags);
3650 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3651 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3652 }
3653 }
3654 }
3655 }
3656#endif
3657
3658 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3659 {
3660 if (fWhat & CPUMCTX_EXTRN_CR0)
3661 {
3662 uint64_t u64Cr0;
3663 uint64_t u64Shadow;
3664 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3665 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3666#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3667 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3668 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3669#else
3670 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3671 {
3672 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3673 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3674 }
3675 else
3676 {
3677 /*
3678 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3679 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3680 * re-construct CR0. See @bugref{9180#c95} for details.
3681 */
3682 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3683 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3684 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3685 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3686 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3687 }
3688#endif
3689#ifndef IN_NEM_DARWIN
3690 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3691#endif
3692 CPUMSetGuestCR0(pVCpu, u64Cr0);
3693#ifndef IN_NEM_DARWIN
3694 VMMRZCallRing3Enable(pVCpu);
3695#endif
3696 }
3697
3698 if (fWhat & CPUMCTX_EXTRN_CR4)
3699 {
3700 uint64_t u64Cr4;
3701 uint64_t u64Shadow;
3702 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3703 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3704#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3705 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3706 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3707#else
3708 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3709 {
3710 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3711 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3712 }
3713 else
3714 {
3715 /*
3716 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3717 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3718 * re-construct CR4. See @bugref{9180#c95} for details.
3719 */
3720 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3721 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3722 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3723 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3724 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3725 }
3726#endif
3727 pCtx->cr4 = u64Cr4;
3728 }
3729
3730 if (fWhat & CPUMCTX_EXTRN_CR3)
3731 {
3732 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3733 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3734 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3735 && CPUMIsGuestPagingEnabledEx(pCtx)))
3736 {
3737 uint64_t u64Cr3;
3738 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3739 if (pCtx->cr3 != u64Cr3)
3740 {
3741 pCtx->cr3 = u64Cr3;
3742 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3743 }
3744
3745 /*
3746 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3747 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3748 */
3749 if (CPUMIsGuestInPAEModeEx(pCtx))
3750 {
3751 X86PDPE aPaePdpes[4];
3752 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3753 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3754 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3755 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3756 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3757 {
3758 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3759 /* PGM now updates PAE PDPTEs while updating CR3. */
3760 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3761 }
3762 }
3763 }
3764 }
3765 }
3766
3767#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3768 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3769 {
3770 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3771 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3772 {
3773 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3774 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3775 if (RT_SUCCESS(rc))
3776 { /* likely */ }
3777 else
3778 break;
3779 }
3780 }
3781#endif
3782 } while (0);
3783
3784 if (RT_SUCCESS(rc))
3785 {
3786 /* Update fExtrn. */
3787 pCtx->fExtrn &= ~fWhat;
3788
3789 /* If everything has been imported, clear the HM keeper bit. */
3790 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3791 {
3792#ifndef IN_NEM_DARWIN
3793 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3794#else
3795 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3796#endif
3797 Assert(!pCtx->fExtrn);
3798 }
3799 }
3800 }
3801#ifndef IN_NEM_DARWIN
3802 else
3803 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3804
3805 /*
3806 * Restore interrupts.
3807 */
3808 ASMSetFlags(fEFlags);
3809#endif
3810
3811 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3812
3813 if (RT_SUCCESS(rc))
3814 { /* likely */ }
3815 else
3816 return rc;
3817
3818 /*
3819 * Honor any pending CR3 updates.
3820 *
3821 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3822 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3823 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3824 *
3825 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3826 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3827 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3828 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3829 *
3830 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3831 *
3832 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3833 */
3834 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3835#ifndef IN_NEM_DARWIN
3836 && VMMRZCallRing3IsEnabled(pVCpu)
3837#endif
3838 )
3839 {
3840 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3841 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3842 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3843 }
3844
3845 return VINF_SUCCESS;
3846}
3847
3848
3849/**
3850 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3851 *
3852 * @returns VBox status code.
3853 * @param pVCpu The cross context virtual CPU structure.
3854 * @param pVmcsInfo The VMCS info. object.
3855 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3856 * in NEM/darwin context.
3857 * @tparam a_fWhat What to import, zero or more bits from
3858 * HMVMX_CPUMCTX_EXTRN_ALL.
3859 */
3860template<uint64_t const a_fWhat>
3861static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3862{
3863 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3864 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3865 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3866 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3867
3868 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3869
3870 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3871
3872 /* RIP and RFLAGS may have been imported already by the post exit code
3873 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3874 of the code is skipping this part of the code. */
3875 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3876 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3877 {
3878 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3879 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3880
3881 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3882 {
3883 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3884 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3885 else
3886 vmxHCImportGuestCoreRip(pVCpu);
3887 }
3888 }
3889
3890 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3891 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3892 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3893
3894 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
3895 {
3896 if (a_fWhat & CPUMCTX_EXTRN_CS)
3897 {
3898 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3899 /** @todo try get rid of this carp, it smells and is probably never ever
3900 * used: */
3901 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3902 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3903 {
3904 vmxHCImportGuestCoreRip(pVCpu);
3905 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3906 }
3907 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3908 }
3909 if (a_fWhat & CPUMCTX_EXTRN_SS)
3910 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3911 if (a_fWhat & CPUMCTX_EXTRN_DS)
3912 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3913 if (a_fWhat & CPUMCTX_EXTRN_ES)
3914 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3915 if (a_fWhat & CPUMCTX_EXTRN_FS)
3916 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3917 if (a_fWhat & CPUMCTX_EXTRN_GS)
3918 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3919
3920 /* Guest TR.
3921 Real-mode emulation using virtual-8086 mode has the fake TSS
3922 (pRealModeTSS) in TR, don't need to import that one. */
3923#ifndef IN_NEM_DARWIN
3924 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
3925 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3926 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
3927#else
3928 if (a_fWhat & CPUMCTX_EXTRN_TR)
3929#endif
3930 vmxHCImportGuestTr(pVCpu);
3931
3932#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
3933 if (fRealOnV86Active)
3934 {
3935 if (a_fWhat & CPUMCTX_EXTRN_CS)
3936 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3937 if (a_fWhat & CPUMCTX_EXTRN_SS)
3938 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3939 if (a_fWhat & CPUMCTX_EXTRN_DS)
3940 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3941 if (a_fWhat & CPUMCTX_EXTRN_ES)
3942 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3943 if (a_fWhat & CPUMCTX_EXTRN_FS)
3944 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3945 if (a_fWhat & CPUMCTX_EXTRN_GS)
3946 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3947 }
3948#endif
3949 }
3950
3951 if (a_fWhat & CPUMCTX_EXTRN_RSP)
3952 {
3953 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
3954 AssertRC(rc);
3955 }
3956
3957 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
3958 vmxHCImportGuestLdtr(pVCpu);
3959
3960 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
3961 {
3962 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
3963 uint32_t u32Val;
3964 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
3965 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
3966 }
3967
3968 /* Guest IDTR. */
3969 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
3970 {
3971 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
3972 uint32_t u32Val;
3973 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
3974 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
3975 }
3976
3977 if (a_fWhat & CPUMCTX_EXTRN_DR7)
3978 {
3979#ifndef IN_NEM_DARWIN
3980 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3981#endif
3982 {
3983 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
3984 AssertRC(rc);
3985 }
3986 }
3987
3988 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3989 {
3990 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
3991 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
3992 uint32_t u32Val;
3993 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
3994 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
3995 }
3996
3997#ifndef IN_NEM_DARWIN
3998 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3999 {
4000 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4001 && pVM->hmr0.s.fAllow64BitGuests)
4002 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4003 }
4004
4005 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4006 {
4007 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4008 && pVM->hmr0.s.fAllow64BitGuests)
4009 {
4010 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4011 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4012 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4013 }
4014 }
4015
4016 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4017 {
4018 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
4019 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
4020 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
4021 Assert(pMsrs);
4022 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
4023 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
4024 for (uint32_t i = 0; i < cMsrs; i++)
4025 {
4026 uint32_t const idMsr = pMsrs[i].u32Msr;
4027 switch (idMsr)
4028 {
4029 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
4030 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
4031 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
4032 default:
4033 {
4034 uint32_t idxLbrMsr;
4035 if (VM_IS_VMX_LBR(pVM))
4036 {
4037 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
4038 {
4039 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4040 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4041 break;
4042 }
4043 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
4044 {
4045 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4046 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4047 break;
4048 }
4049 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
4050 {
4051 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
4052 break;
4053 }
4054 }
4055 pVCpu->cpum.GstCtx.fExtrn = 0;
4056 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
4057 ASMSetFlags(fEFlags);
4058 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
4059 return VERR_HM_UNEXPECTED_LD_ST_MSR;
4060 }
4061 }
4062 }
4063 }
4064#endif
4065
4066 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4067 {
4068 uint64_t u64Cr0;
4069 uint64_t u64Shadow;
4070 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc1);
4071 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4072#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4073 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4074 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4075#else
4076 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4077 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4078 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4079 else
4080 {
4081 /*
4082 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
4083 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4084 * re-construct CR0. See @bugref{9180#c95} for details.
4085 */
4086 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4087 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4088 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4089 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
4090 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
4091 }
4092#endif
4093#ifndef IN_NEM_DARWIN
4094 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
4095#endif
4096 CPUMSetGuestCR0(pVCpu, u64Cr0);
4097#ifndef IN_NEM_DARWIN
4098 VMMRZCallRing3Enable(pVCpu);
4099#endif
4100 }
4101
4102 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4103 {
4104 uint64_t u64Cr4;
4105 uint64_t u64Shadow;
4106 int rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc1);
4107 int rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4108#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4109 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4110 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4111#else
4112 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4113 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4114 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4115 else
4116 {
4117 /*
4118 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
4119 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4120 * re-construct CR4. See @bugref{9180#c95} for details.
4121 */
4122 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4123 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4124 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4125 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
4126 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
4127 }
4128#endif
4129 pVCpu->cpum.GstCtx.cr4 = u64Cr4;
4130 }
4131
4132 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4133 {
4134 /* CR0.PG bit changes are always intercepted, so it's up to date. */
4135 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
4136 || ( VM_IS_VMX_NESTED_PAGING(pVM)
4137 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)))
4138 {
4139 uint64_t u64Cr3;
4140 int const rc0 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc0);
4141 if (pVCpu->cpum.GstCtx.cr3 != u64Cr3)
4142 {
4143 pVCpu->cpum.GstCtx.cr3 = u64Cr3;
4144 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4145 }
4146
4147 /*
4148 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
4149 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
4150 */
4151 if (CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx))
4152 {
4153 X86PDPE aPaePdpes[4];
4154 int const rc1 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc1);
4155 int const rc2 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc2);
4156 int const rc3 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc3);
4157 int const rc4 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc4);
4158 if (memcmp(&aPaePdpes[0], &pVCpu->cpum.GstCtx.aPaePdpes[0], sizeof(aPaePdpes)))
4159 {
4160 memcpy(&pVCpu->cpum.GstCtx.aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
4161 /* PGM now updates PAE PDPTEs while updating CR3. */
4162 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4163 }
4164 }
4165 }
4166 }
4167
4168#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4169 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4170 {
4171 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4172 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4173 {
4174 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4175 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4176 AssertRCReturn(rc, rc);
4177 }
4178 }
4179#endif
4180
4181 /* Update fExtrn. */
4182 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4183
4184 /* If everything has been imported, clear the HM keeper bit. */
4185 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4186 {
4187#ifndef IN_NEM_DARWIN
4188 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4189#else
4190 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4191#endif
4192 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4193 }
4194
4195 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4196
4197 /*
4198 * Honor any pending CR3 updates.
4199 *
4200 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4201 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4202 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4203 *
4204 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4205 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4206 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4207 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4208 *
4209 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4210 *
4211 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4212 */
4213#ifndef IN_NEM_DARWIN
4214 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4215 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4216 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4217 return VINF_SUCCESS;
4218 ASMSetFlags(fEFlags);
4219#else
4220 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4221 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4222 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4223 return VINF_SUCCESS;
4224 RT_NOREF_PV(fEFlags);
4225#endif
4226
4227 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4228 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4229 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4230 return VINF_SUCCESS;
4231}
4232
4233
4234/**
4235 * Internal state fetcher.
4236 *
4237 * @returns VBox status code.
4238 * @param pVCpu The cross context virtual CPU structure.
4239 * @param pVmcsInfo The VMCS info. object.
4240 * @param pszCaller For logging.
4241 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4242 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4243 * already. This is ORed together with @a a_fWhat when
4244 * calculating what needs fetching (just for safety).
4245 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4246 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4247 * already. This is ORed together with @a a_fWhat when
4248 * calculating what needs fetching (just for safety).
4249 */
4250template<uint64_t const a_fWhat,
4251 uint64_t const a_fDoneLocal = 0,
4252 uint64_t const a_fDonePostExit = 0
4253#ifndef IN_NEM_DARWIN
4254 | CPUMCTX_EXTRN_INHIBIT_INT
4255 | CPUMCTX_EXTRN_INHIBIT_NMI
4256# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4257 | HMVMX_CPUMCTX_EXTRN_ALL
4258# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4259 | CPUMCTX_EXTRN_RFLAGS
4260# endif
4261#else /* IN_NEM_DARWIN */
4262 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4263#endif /* IN_NEM_DARWIN */
4264>
4265DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4266{
4267 RT_NOREF_PV(pszCaller);
4268 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4269 {
4270#ifndef IN_NEM_DARWIN
4271 /*
4272 * We disable interrupts to make the updating of the state and in particular
4273 * the fExtrn modification atomic wrt to preemption hooks.
4274 */
4275 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4276#else
4277 RTCCUINTREG const fEFlags = 0;
4278#endif
4279
4280 /*
4281 * We combine all three parameters and take the (probably) inlined optimized
4282 * code path for the new things specified in a_fWhat.
4283 *
4284 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4285 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4286 * also take the streamlined path when both of these are cleared in fExtrn
4287 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4288 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4289 */
4290 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4291 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4292 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4293 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4294 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4295 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4296 {
4297 int const rc = vmxHCImportGuestStateInner< a_fWhat
4298 & HMVMX_CPUMCTX_EXTRN_ALL
4299 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4300#ifndef IN_NEM_DARWIN
4301 ASMSetFlags(fEFlags);
4302#endif
4303 return rc;
4304 }
4305
4306#ifndef IN_NEM_DARWIN
4307 ASMSetFlags(fEFlags);
4308#endif
4309
4310 /*
4311 * We shouldn't normally get here, but it may happen when executing
4312 * in the debug run-loops. Typically, everything should already have
4313 * been fetched then. Otherwise call the fallback state import function.
4314 */
4315 if (fWhatToDo == 0)
4316 { /* hope the cause was the debug loop or something similar */ }
4317 else
4318 {
4319 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4320 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4321 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4322 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4323 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4324 }
4325 }
4326 return VINF_SUCCESS;
4327}
4328
4329
4330/**
4331 * Check per-VM and per-VCPU force flag actions that require us to go back to
4332 * ring-3 for one reason or another.
4333 *
4334 * @returns Strict VBox status code (i.e. informational status codes too)
4335 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4336 * ring-3.
4337 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4338 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4339 * interrupts)
4340 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4341 * all EMTs to be in ring-3.
4342 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4343 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4344 * to the EM loop.
4345 *
4346 * @param pVCpu The cross context virtual CPU structure.
4347 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4348 * @param fStepping Whether we are single-stepping the guest using the
4349 * hypervisor debugger.
4350 *
4351 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4352 * is no longer in VMX non-root mode.
4353 */
4354static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4355{
4356#ifndef IN_NEM_DARWIN
4357 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4358#endif
4359
4360 /*
4361 * Update pending interrupts into the APIC's IRR.
4362 */
4363 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4364 APICUpdatePendingInterrupts(pVCpu);
4365
4366 /*
4367 * Anything pending? Should be more likely than not if we're doing a good job.
4368 */
4369 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4370 if ( !fStepping
4371 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4372 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4373 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4374 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4375 return VINF_SUCCESS;
4376
4377 /* Pending PGM C3 sync. */
4378 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4379 {
4380 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4381 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4382 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4383 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4384 if (rcStrict != VINF_SUCCESS)
4385 {
4386 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4387 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4388 return rcStrict;
4389 }
4390 }
4391
4392 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4393 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4394 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4395 {
4396 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4397 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4398 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4399 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4400 return rc;
4401 }
4402
4403 /* Pending VM request packets, such as hardware interrupts. */
4404 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4405 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4406 {
4407 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4408 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4409 return VINF_EM_PENDING_REQUEST;
4410 }
4411
4412 /* Pending PGM pool flushes. */
4413 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4414 {
4415 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4416 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4417 return VINF_PGM_POOL_FLUSH_PENDING;
4418 }
4419
4420 /* Pending DMA requests. */
4421 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4422 {
4423 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4424 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4425 return VINF_EM_RAW_TO_R3;
4426 }
4427
4428#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4429 /*
4430 * Pending nested-guest events.
4431 *
4432 * Please note the priority of these events are specified and important.
4433 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4434 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4435 */
4436 if (fIsNestedGuest)
4437 {
4438 /* Pending nested-guest APIC-write. */
4439 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4440 {
4441 Log4Func(("Pending nested-guest APIC-write\n"));
4442 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4443 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4444 return rcStrict;
4445 }
4446
4447 /* Pending nested-guest monitor-trap flag (MTF). */
4448 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4449 {
4450 Log4Func(("Pending nested-guest MTF\n"));
4451 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4452 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4453 return rcStrict;
4454 }
4455
4456 /* Pending nested-guest VMX-preemption timer expired. */
4457 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4458 {
4459 Log4Func(("Pending nested-guest preempt timer\n"));
4460 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4461 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4462 return rcStrict;
4463 }
4464 }
4465#else
4466 NOREF(fIsNestedGuest);
4467#endif
4468
4469 return VINF_SUCCESS;
4470}
4471
4472
4473/**
4474 * Converts any TRPM trap into a pending HM event. This is typically used when
4475 * entering from ring-3 (not longjmp returns).
4476 *
4477 * @param pVCpu The cross context virtual CPU structure.
4478 */
4479static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4480{
4481 Assert(TRPMHasTrap(pVCpu));
4482 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4483
4484 uint8_t uVector;
4485 TRPMEVENT enmTrpmEvent;
4486 uint32_t uErrCode;
4487 RTGCUINTPTR GCPtrFaultAddress;
4488 uint8_t cbInstr;
4489 bool fIcebp;
4490
4491 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4492 AssertRC(rc);
4493
4494 uint32_t u32IntInfo;
4495 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4496 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4497
4498 rc = TRPMResetTrap(pVCpu);
4499 AssertRC(rc);
4500 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4501 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4502
4503 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4504}
4505
4506
4507/**
4508 * Converts the pending HM event into a TRPM trap.
4509 *
4510 * @param pVCpu The cross context virtual CPU structure.
4511 */
4512static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4513{
4514 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4515
4516 /* If a trap was already pending, we did something wrong! */
4517 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4518
4519 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4520 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4521 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4522
4523 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4524
4525 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4526 AssertRC(rc);
4527
4528 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4529 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4530
4531 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4532 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4533 else
4534 {
4535 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4536 switch (uVectorType)
4537 {
4538 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4539 TRPMSetTrapDueToIcebp(pVCpu);
4540 RT_FALL_THRU();
4541 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4542 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4543 {
4544 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4545 || ( uVector == X86_XCPT_BP /* INT3 */
4546 || uVector == X86_XCPT_OF /* INTO */
4547 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4548 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4549 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4550 break;
4551 }
4552 }
4553 }
4554
4555 /* We're now done converting the pending event. */
4556 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4557}
4558
4559
4560/**
4561 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4562 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4563 *
4564 * @param pVCpu The cross context virtual CPU structure.
4565 * @param pVmcsInfo The VMCS info. object.
4566 */
4567static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4568{
4569 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4570 {
4571 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4572 {
4573 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4574 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4575 AssertRC(rc);
4576 }
4577 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4578}
4579
4580
4581/**
4582 * Clears the interrupt-window exiting control in the VMCS.
4583 *
4584 * @param pVCpu The cross context virtual CPU structure.
4585 * @param pVmcsInfo The VMCS info. object.
4586 */
4587DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4588{
4589 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4590 {
4591 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4592 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4593 AssertRC(rc);
4594 }
4595}
4596
4597
4598/**
4599 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4600 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4601 *
4602 * @param pVCpu The cross context virtual CPU structure.
4603 * @param pVmcsInfo The VMCS info. object.
4604 */
4605static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4606{
4607 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4608 {
4609 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4610 {
4611 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4612 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4613 AssertRC(rc);
4614 Log4Func(("Setup NMI-window exiting\n"));
4615 }
4616 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4617}
4618
4619
4620/**
4621 * Clears the NMI-window exiting control in the VMCS.
4622 *
4623 * @param pVCpu The cross context virtual CPU structure.
4624 * @param pVmcsInfo The VMCS info. object.
4625 */
4626DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4627{
4628 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4629 {
4630 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4631 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4632 AssertRC(rc);
4633 }
4634}
4635
4636
4637/**
4638 * Injects an event into the guest upon VM-entry by updating the relevant fields
4639 * in the VM-entry area in the VMCS.
4640 *
4641 * @returns Strict VBox status code (i.e. informational status codes too).
4642 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4643 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4644 *
4645 * @param pVCpu The cross context virtual CPU structure.
4646 * @param pVmcsInfo The VMCS info object.
4647 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4648 * @param pEvent The event being injected.
4649 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4650 * will be updated if necessary. This cannot not be NULL.
4651 * @param fStepping Whether we're single-stepping guest execution and should
4652 * return VINF_EM_DBG_STEPPED if the event is injected
4653 * directly (registers modified by us, not by hardware on
4654 * VM-entry).
4655 */
4656static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4657 bool fStepping, uint32_t *pfIntrState)
4658{
4659 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4660 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4661 Assert(pfIntrState);
4662
4663#ifdef IN_NEM_DARWIN
4664 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4665#endif
4666
4667 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4668 uint32_t u32IntInfo = pEvent->u64IntInfo;
4669 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4670 uint32_t const cbInstr = pEvent->cbInstr;
4671 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4672 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4673 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4674
4675#ifdef VBOX_STRICT
4676 /*
4677 * Validate the error-code-valid bit for hardware exceptions.
4678 * No error codes for exceptions in real-mode.
4679 *
4680 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4681 */
4682 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4683 && !CPUMIsGuestInRealModeEx(pCtx))
4684 {
4685 switch (uVector)
4686 {
4687 case X86_XCPT_PF:
4688 case X86_XCPT_DF:
4689 case X86_XCPT_TS:
4690 case X86_XCPT_NP:
4691 case X86_XCPT_SS:
4692 case X86_XCPT_GP:
4693 case X86_XCPT_AC:
4694 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4695 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4696 RT_FALL_THRU();
4697 default:
4698 break;
4699 }
4700 }
4701
4702 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4703 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4704 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4705#endif
4706
4707 RT_NOREF(uVector);
4708 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4709 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4710 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4711 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4712 {
4713 Assert(uVector <= X86_XCPT_LAST);
4714 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4715 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4716 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4717 }
4718 else
4719 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4720
4721 /*
4722 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4723 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4724 * interrupt handler in the (real-mode) guest.
4725 *
4726 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4727 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4728 */
4729 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4730 {
4731#ifndef IN_NEM_DARWIN
4732 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4733#endif
4734 {
4735 /*
4736 * For CPUs with unrestricted guest execution enabled and with the guest
4737 * in real-mode, we must not set the deliver-error-code bit.
4738 *
4739 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4740 */
4741 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4742 }
4743#ifndef IN_NEM_DARWIN
4744 else
4745 {
4746 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4747 Assert(PDMVmmDevHeapIsEnabled(pVM));
4748 Assert(pVM->hm.s.vmx.pRealModeTSS);
4749 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4750
4751 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4752 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4753 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4754 AssertRCReturn(rc2, rc2);
4755
4756 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4757 size_t const cbIdtEntry = sizeof(X86IDTR16);
4758 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4759 {
4760 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4761 if (uVector == X86_XCPT_DF)
4762 return VINF_EM_RESET;
4763
4764 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4765 No error codes for exceptions in real-mode. */
4766 if (uVector == X86_XCPT_GP)
4767 {
4768 static HMEVENT const s_EventXcptDf
4769 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4770 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4771 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4772 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4773 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4774 }
4775
4776 /*
4777 * If we're injecting an event with no valid IDT entry, inject a #GP.
4778 * No error codes for exceptions in real-mode.
4779 *
4780 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4781 */
4782 static HMEVENT const s_EventXcptGp
4783 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4784 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4785 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4786 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4787 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4788 }
4789
4790 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4791 uint16_t uGuestIp = pCtx->ip;
4792 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4793 {
4794 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4795 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4796 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4797 }
4798 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4799 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4800
4801 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4802 X86IDTR16 IdtEntry;
4803 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4804 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4805 AssertRCReturn(rc2, rc2);
4806
4807 /* Construct the stack frame for the interrupt/exception handler. */
4808 VBOXSTRICTRC rcStrict;
4809 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4810 if (rcStrict == VINF_SUCCESS)
4811 {
4812 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4813 if (rcStrict == VINF_SUCCESS)
4814 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4815 }
4816
4817 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4818 if (rcStrict == VINF_SUCCESS)
4819 {
4820 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4821 pCtx->rip = IdtEntry.offSel;
4822 pCtx->cs.Sel = IdtEntry.uSel;
4823 pCtx->cs.ValidSel = IdtEntry.uSel;
4824 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4825 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4826 && uVector == X86_XCPT_PF)
4827 pCtx->cr2 = GCPtrFault;
4828
4829 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4830 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4831 | HM_CHANGED_GUEST_RSP);
4832
4833 /*
4834 * If we delivered a hardware exception (other than an NMI) and if there was
4835 * block-by-STI in effect, we should clear it.
4836 */
4837 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4838 {
4839 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4840 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4841 Log4Func(("Clearing inhibition due to STI\n"));
4842 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4843 }
4844
4845 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4846 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4847
4848 /*
4849 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4850 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4851 */
4852 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4853
4854 /*
4855 * If we eventually support nested-guest execution without unrestricted guest execution,
4856 * we should set fInterceptEvents here.
4857 */
4858 Assert(!fIsNestedGuest);
4859
4860 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4861 if (fStepping)
4862 rcStrict = VINF_EM_DBG_STEPPED;
4863 }
4864 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4865 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4866 return rcStrict;
4867 }
4868#else
4869 RT_NOREF(pVmcsInfo);
4870#endif
4871 }
4872
4873 /*
4874 * Validate.
4875 */
4876 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4877 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4878
4879 /*
4880 * Inject the event into the VMCS.
4881 */
4882 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4883 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4884 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4885 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4886 AssertRC(rc);
4887
4888 /*
4889 * Update guest CR2 if this is a page-fault.
4890 */
4891 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4892 pCtx->cr2 = GCPtrFault;
4893
4894 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4895 return VINF_SUCCESS;
4896}
4897
4898
4899/**
4900 * Evaluates the event to be delivered to the guest and sets it as the pending
4901 * event.
4902 *
4903 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4904 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4905 * NOT restore these force-flags.
4906 *
4907 * @returns Strict VBox status code (i.e. informational status codes too).
4908 * @param pVCpu The cross context virtual CPU structure.
4909 * @param pVmcsInfo The VMCS information structure.
4910 * @param fIsNestedGuest Flag whether the evaluation happens for a nested guest.
4911 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4912 */
4913static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4914{
4915 Assert(pfIntrState);
4916 Assert(!TRPMHasTrap(pVCpu));
4917
4918 /*
4919 * Compute/update guest-interruptibility state related FFs.
4920 * The FFs will be used below while evaluating events to be injected.
4921 */
4922 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4923
4924 /*
4925 * Evaluate if a new event needs to be injected.
4926 * An event that's already pending has already performed all necessary checks.
4927 */
4928 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4929 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4930 {
4931 /** @todo SMI. SMIs take priority over NMIs. */
4932
4933 /*
4934 * NMIs.
4935 * NMIs take priority over external interrupts.
4936 */
4937#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4938 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4939#endif
4940 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4941 {
4942 /*
4943 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4944 *
4945 * For a nested-guest, the FF always indicates the outer guest's ability to
4946 * receive an NMI while the guest-interruptibility state bit depends on whether
4947 * the nested-hypervisor is using virtual-NMIs.
4948 */
4949 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4950 {
4951#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4952 if ( fIsNestedGuest
4953 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4954 return IEMExecVmxVmexitXcptNmi(pVCpu);
4955#endif
4956 vmxHCSetPendingXcptNmi(pVCpu);
4957 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4958 Log4Func(("NMI pending injection\n"));
4959
4960 /* We've injected the NMI, bail. */
4961 return VINF_SUCCESS;
4962 }
4963 else if (!fIsNestedGuest)
4964 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4965 }
4966
4967 /*
4968 * External interrupts (PIC/APIC).
4969 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4970 * We cannot re-request the interrupt from the controller again.
4971 */
4972 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4973 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4974 {
4975 Assert(!DBGFIsStepping(pVCpu));
4976 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4977 AssertRC(rc);
4978
4979 /*
4980 * We must not check EFLAGS directly when executing a nested-guest, use
4981 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4982 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4983 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4984 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4985 *
4986 * See Intel spec. 25.4.1 "Event Blocking".
4987 */
4988 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4989 {
4990#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4991 if ( fIsNestedGuest
4992 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4993 {
4994 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4995 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4996 return rcStrict;
4997 }
4998#endif
4999 uint8_t u8Interrupt;
5000 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5001 if (RT_SUCCESS(rc))
5002 {
5003#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5004 if ( fIsNestedGuest
5005 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5006 {
5007 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
5008 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5009 return rcStrict;
5010 }
5011#endif
5012 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
5013 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
5014 }
5015 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
5016 {
5017 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
5018
5019 if ( !fIsNestedGuest
5020 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
5021 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
5022 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
5023
5024 /*
5025 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
5026 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
5027 * need to re-set this force-flag here.
5028 */
5029 }
5030 else
5031 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
5032
5033 /* We've injected the interrupt or taken necessary action, bail. */
5034 return VINF_SUCCESS;
5035 }
5036 if (!fIsNestedGuest)
5037 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5038 }
5039 }
5040 else if (!fIsNestedGuest)
5041 {
5042 /*
5043 * An event is being injected or we are in an interrupt shadow. Check if another event is
5044 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
5045 * the pending event.
5046 */
5047 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5048 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
5049 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5050 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5051 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5052 }
5053 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
5054
5055 return VINF_SUCCESS;
5056}
5057
5058
5059/**
5060 * Injects any pending events into the guest if the guest is in a state to
5061 * receive them.
5062 *
5063 * @returns Strict VBox status code (i.e. informational status codes too).
5064 * @param pVCpu The cross context virtual CPU structure.
5065 * @param pVmcsInfo The VMCS information structure.
5066 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5067 * @param fIntrState The VT-x guest-interruptibility state.
5068 * @param fStepping Whether we are single-stepping the guest using the
5069 * hypervisor debugger and should return
5070 * VINF_EM_DBG_STEPPED if the event was dispatched
5071 * directly.
5072 */
5073static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5074 uint32_t fIntrState, bool fStepping)
5075{
5076 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5077#ifndef IN_NEM_DARWIN
5078 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5079#endif
5080
5081#ifdef VBOX_STRICT
5082 /*
5083 * Verify guest-interruptibility state.
5084 *
5085 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5086 * since injecting an event may modify the interruptibility state and we must thus always
5087 * use fIntrState.
5088 */
5089 {
5090 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5091 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5092 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5093 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5094 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5095 Assert(!TRPMHasTrap(pVCpu));
5096 NOREF(fBlockMovSS); NOREF(fBlockSti);
5097 }
5098#endif
5099
5100 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5101 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5102 {
5103 /*
5104 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5105 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5106 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5107 *
5108 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5109 */
5110 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5111#ifdef VBOX_STRICT
5112 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5113 {
5114 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
5115 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5116 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5117 }
5118 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5119 {
5120 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5121 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5122 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5123 }
5124#endif
5125 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5126 uIntType));
5127
5128 /*
5129 * Inject the event and get any changes to the guest-interruptibility state.
5130 *
5131 * The guest-interruptibility state may need to be updated if we inject the event
5132 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5133 */
5134 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5135 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5136
5137 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5138 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5139 else
5140 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5141 }
5142
5143 /*
5144 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5145 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5146 */
5147 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5148 && !fIsNestedGuest)
5149 {
5150 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5151
5152 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5153 {
5154 /*
5155 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5156 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5157 */
5158 Assert(!DBGFIsStepping(pVCpu));
5159 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
5160 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5161 AssertRC(rc);
5162 }
5163 else
5164 {
5165 /*
5166 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5167 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5168 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5169 * we use MTF, so just make sure it's called before executing guest-code.
5170 */
5171 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5172 }
5173 }
5174 /* else: for nested-guest currently handling while merging controls. */
5175
5176 /*
5177 * Finally, update the guest-interruptibility state.
5178 *
5179 * This is required for the real-on-v86 software interrupt injection, for
5180 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5181 */
5182 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5183 AssertRC(rc);
5184
5185 /*
5186 * There's no need to clear the VM-entry interruption-information field here if we're not
5187 * injecting anything. VT-x clears the valid bit on every VM-exit.
5188 *
5189 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5190 */
5191
5192 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5193 return rcStrict;
5194}
5195
5196
5197/**
5198 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5199 * and update error record fields accordingly.
5200 *
5201 * @returns VMX_IGS_* error codes.
5202 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5203 * wrong with the guest state.
5204 *
5205 * @param pVCpu The cross context virtual CPU structure.
5206 * @param pVmcsInfo The VMCS info. object.
5207 *
5208 * @remarks This function assumes our cache of the VMCS controls
5209 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5210 */
5211static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5212{
5213#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5214#define HMVMX_CHECK_BREAK(expr, err) do { \
5215 if (!(expr)) { uError = (err); break; } \
5216 } while (0)
5217
5218 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5219 uint32_t uError = VMX_IGS_ERROR;
5220 uint32_t u32IntrState = 0;
5221#ifndef IN_NEM_DARWIN
5222 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5223 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5224#else
5225 bool const fUnrestrictedGuest = true;
5226#endif
5227 do
5228 {
5229 int rc;
5230
5231 /*
5232 * Guest-interruptibility state.
5233 *
5234 * Read this first so that any check that fails prior to those that actually
5235 * require the guest-interruptibility state would still reflect the correct
5236 * VMCS value and avoids causing further confusion.
5237 */
5238 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5239 AssertRC(rc);
5240
5241 uint32_t u32Val;
5242 uint64_t u64Val;
5243
5244 /*
5245 * CR0.
5246 */
5247 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5248 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5249 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5250 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5251 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5252 if (fUnrestrictedGuest)
5253 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5254
5255 uint64_t u64GuestCr0;
5256 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5257 AssertRC(rc);
5258 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5259 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5260 if ( !fUnrestrictedGuest
5261 && (u64GuestCr0 & X86_CR0_PG)
5262 && !(u64GuestCr0 & X86_CR0_PE))
5263 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5264
5265 /*
5266 * CR4.
5267 */
5268 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5269 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5270 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5271
5272 uint64_t u64GuestCr4;
5273 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5274 AssertRC(rc);
5275 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5276 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5277
5278 /*
5279 * IA32_DEBUGCTL MSR.
5280 */
5281 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5282 AssertRC(rc);
5283 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5284 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5285 {
5286 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5287 }
5288 uint64_t u64DebugCtlMsr = u64Val;
5289
5290#ifdef VBOX_STRICT
5291 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5292 AssertRC(rc);
5293 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5294#endif
5295 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5296
5297 /*
5298 * RIP and RFLAGS.
5299 */
5300 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5301 AssertRC(rc);
5302 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5303 if ( !fLongModeGuest
5304 || !pCtx->cs.Attr.n.u1Long)
5305 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5306 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5307 * must be identical if the "IA-32e mode guest" VM-entry
5308 * control is 1 and CS.L is 1. No check applies if the
5309 * CPU supports 64 linear-address bits. */
5310
5311 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5312 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5313 AssertRC(rc);
5314 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5315 VMX_IGS_RFLAGS_RESERVED);
5316 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5317 uint32_t const u32Eflags = u64Val;
5318
5319 if ( fLongModeGuest
5320 || ( fUnrestrictedGuest
5321 && !(u64GuestCr0 & X86_CR0_PE)))
5322 {
5323 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5324 }
5325
5326 uint32_t u32EntryInfo;
5327 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5328 AssertRC(rc);
5329 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5330 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5331
5332 /*
5333 * 64-bit checks.
5334 */
5335 if (fLongModeGuest)
5336 {
5337 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5338 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5339 }
5340
5341 if ( !fLongModeGuest
5342 && (u64GuestCr4 & X86_CR4_PCIDE))
5343 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5344
5345 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5346 * 51:32 beyond the processor's physical-address width are 0. */
5347
5348 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5349 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5350 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5351
5352#ifndef IN_NEM_DARWIN
5353 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5354 AssertRC(rc);
5355 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5356
5357 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5358 AssertRC(rc);
5359 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5360#endif
5361
5362 /*
5363 * PERF_GLOBAL MSR.
5364 */
5365 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5366 {
5367 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5368 AssertRC(rc);
5369 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5370 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5371 }
5372
5373 /*
5374 * PAT MSR.
5375 */
5376 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5377 {
5378 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5379 AssertRC(rc);
5380 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5381 for (unsigned i = 0; i < 8; i++)
5382 {
5383 uint8_t u8Val = (u64Val & 0xff);
5384 if ( u8Val != 0 /* UC */
5385 && u8Val != 1 /* WC */
5386 && u8Val != 4 /* WT */
5387 && u8Val != 5 /* WP */
5388 && u8Val != 6 /* WB */
5389 && u8Val != 7 /* UC- */)
5390 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5391 u64Val >>= 8;
5392 }
5393 }
5394
5395 /*
5396 * EFER MSR.
5397 */
5398 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5399 {
5400 Assert(g_fHmVmxSupportsVmcsEfer);
5401 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5402 AssertRC(rc);
5403 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5404 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5405 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5406 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5407 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5408 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5409 * iemVmxVmentryCheckGuestState(). */
5410 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5411 || !(u64GuestCr0 & X86_CR0_PG)
5412 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5413 VMX_IGS_EFER_LMA_LME_MISMATCH);
5414 }
5415
5416 /*
5417 * Segment registers.
5418 */
5419 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5420 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5421 if (!(u32Eflags & X86_EFL_VM))
5422 {
5423 /* CS */
5424 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5425 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5426 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5427 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5428 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5429 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5430 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5431 /* CS cannot be loaded with NULL in protected mode. */
5432 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5433 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5434 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5435 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5436 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5437 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5438 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5439 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5440 else
5441 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5442
5443 /* SS */
5444 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5445 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5446 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5447 if ( !(pCtx->cr0 & X86_CR0_PE)
5448 || pCtx->cs.Attr.n.u4Type == 3)
5449 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5450
5451 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5452 {
5453 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5454 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5455 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5456 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5457 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5458 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5459 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5460 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5461 }
5462
5463 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5464 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5465 {
5466 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5467 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5468 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5469 || pCtx->ds.Attr.n.u4Type > 11
5470 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5471 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5472 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5473 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5474 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5475 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5476 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5477 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5478 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5479 }
5480 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5481 {
5482 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5483 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5484 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5485 || pCtx->es.Attr.n.u4Type > 11
5486 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5487 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5488 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5489 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5490 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5491 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5492 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5493 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5494 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5495 }
5496 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5497 {
5498 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5499 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5500 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5501 || pCtx->fs.Attr.n.u4Type > 11
5502 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5503 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5504 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5505 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5506 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5507 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5508 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5509 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5510 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5511 }
5512 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5513 {
5514 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5515 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5516 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5517 || pCtx->gs.Attr.n.u4Type > 11
5518 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5519 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5520 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5521 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5522 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5523 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5524 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5525 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5526 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5527 }
5528 /* 64-bit capable CPUs. */
5529 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5530 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5531 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5532 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5533 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5534 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5535 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5536 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5537 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5538 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5539 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5540 }
5541 else
5542 {
5543 /* V86 mode checks. */
5544 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5545 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5546 {
5547 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5548 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5549 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5550 }
5551 else
5552 {
5553 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5554 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5555 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5556 }
5557
5558 /* CS */
5559 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5560 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5561 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5562 /* SS */
5563 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5564 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5565 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5566 /* DS */
5567 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5568 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5569 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5570 /* ES */
5571 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5572 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5573 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5574 /* FS */
5575 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5576 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5577 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5578 /* GS */
5579 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5580 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5581 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5582 /* 64-bit capable CPUs. */
5583 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5584 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5585 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5586 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5587 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5588 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5589 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5590 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5591 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5592 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5593 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5594 }
5595
5596 /*
5597 * TR.
5598 */
5599 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5600 /* 64-bit capable CPUs. */
5601 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5602 if (fLongModeGuest)
5603 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5604 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5605 else
5606 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5607 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5608 VMX_IGS_TR_ATTR_TYPE_INVALID);
5609 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5610 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5611 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5612 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5613 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5614 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5615 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5616 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5617
5618 /*
5619 * GDTR and IDTR (64-bit capable checks).
5620 */
5621 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5622 AssertRC(rc);
5623 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5624
5625 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5626 AssertRC(rc);
5627 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5628
5629 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5630 AssertRC(rc);
5631 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5632
5633 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5634 AssertRC(rc);
5635 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5636
5637 /*
5638 * Guest Non-Register State.
5639 */
5640 /* Activity State. */
5641 uint32_t u32ActivityState;
5642 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5643 AssertRC(rc);
5644 HMVMX_CHECK_BREAK( !u32ActivityState
5645 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5646 VMX_IGS_ACTIVITY_STATE_INVALID);
5647 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5648 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5649
5650 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5651 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5652 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5653
5654 /** @todo Activity state and injecting interrupts. Left as a todo since we
5655 * currently don't use activity states but ACTIVE. */
5656
5657 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5658 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5659
5660 /* Guest interruptibility-state. */
5661 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5662 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5663 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5664 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5665 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5666 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5667 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5668 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5669 {
5670 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5671 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5672 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5673 }
5674 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5675 {
5676 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5677 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5678 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5679 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5680 }
5681 /** @todo Assumes the processor is not in SMM. */
5682 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5683 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5684 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5685 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5686 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5687 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5688 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5689 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5690
5691 /* Pending debug exceptions. */
5692 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5693 AssertRC(rc);
5694 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5695 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5696 u32Val = u64Val; /* For pending debug exceptions checks below. */
5697
5698 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5699 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5700 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5701 {
5702 if ( (u32Eflags & X86_EFL_TF)
5703 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5704 {
5705 /* Bit 14 is PendingDebug.BS. */
5706 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5707 }
5708 if ( !(u32Eflags & X86_EFL_TF)
5709 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5710 {
5711 /* Bit 14 is PendingDebug.BS. */
5712 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5713 }
5714 }
5715
5716#ifndef IN_NEM_DARWIN
5717 /* VMCS link pointer. */
5718 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5719 AssertRC(rc);
5720 if (u64Val != UINT64_C(0xffffffffffffffff))
5721 {
5722 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5723 /** @todo Bits beyond the processor's physical-address width MBZ. */
5724 /** @todo SMM checks. */
5725 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5726 Assert(pVmcsInfo->pvShadowVmcs);
5727 VMXVMCSREVID VmcsRevId;
5728 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5729 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5730 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5731 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5732 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5733 }
5734
5735 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5736 * not using nested paging? */
5737 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5738 && !fLongModeGuest
5739 && CPUMIsGuestInPAEModeEx(pCtx))
5740 {
5741 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5742 AssertRC(rc);
5743 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5744
5745 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5746 AssertRC(rc);
5747 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5748
5749 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5750 AssertRC(rc);
5751 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5752
5753 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5754 AssertRC(rc);
5755 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5756 }
5757#endif
5758
5759 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5760 if (uError == VMX_IGS_ERROR)
5761 uError = VMX_IGS_REASON_NOT_FOUND;
5762 } while (0);
5763
5764 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5765 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5766 return uError;
5767
5768#undef HMVMX_ERROR_BREAK
5769#undef HMVMX_CHECK_BREAK
5770}
5771
5772
5773#ifndef HMVMX_USE_FUNCTION_TABLE
5774/**
5775 * Handles a guest VM-exit from hardware-assisted VMX execution.
5776 *
5777 * @returns Strict VBox status code (i.e. informational status codes too).
5778 * @param pVCpu The cross context virtual CPU structure.
5779 * @param pVmxTransient The VMX-transient structure.
5780 */
5781DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5782{
5783#ifdef DEBUG_ramshankar
5784# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5785 do { \
5786 if (a_fSave != 0) \
5787 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5788 VBOXSTRICTRC rcStrict = a_CallExpr; \
5789 if (a_fSave != 0) \
5790 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5791 return rcStrict; \
5792 } while (0)
5793#else
5794# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5795#endif
5796 uint32_t const uExitReason = pVmxTransient->uExitReason;
5797 switch (uExitReason)
5798 {
5799 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5800 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5801 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5802 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5803 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5804 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5805 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5806 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5807 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5808 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5809 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5810 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5811 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5812 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5813 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5814 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5815 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5816 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5817 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5818 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5819 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5820 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5821 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5822 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5823 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5824 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5825 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5826 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5827 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5828 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5829#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5830 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5831 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5832 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5833 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5834 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5835 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5836 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5837 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5838 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5839 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5840#else
5841 case VMX_EXIT_VMCLEAR:
5842 case VMX_EXIT_VMLAUNCH:
5843 case VMX_EXIT_VMPTRLD:
5844 case VMX_EXIT_VMPTRST:
5845 case VMX_EXIT_VMREAD:
5846 case VMX_EXIT_VMRESUME:
5847 case VMX_EXIT_VMWRITE:
5848 case VMX_EXIT_VMXOFF:
5849 case VMX_EXIT_VMXON:
5850 case VMX_EXIT_INVVPID:
5851 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5852#endif
5853#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5854 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5855#else
5856 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5857#endif
5858
5859 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5860 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5861 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5862
5863 case VMX_EXIT_INIT_SIGNAL:
5864 case VMX_EXIT_SIPI:
5865 case VMX_EXIT_IO_SMI:
5866 case VMX_EXIT_SMI:
5867 case VMX_EXIT_ERR_MSR_LOAD:
5868 case VMX_EXIT_ERR_MACHINE_CHECK:
5869 case VMX_EXIT_PML_FULL:
5870 case VMX_EXIT_VIRTUALIZED_EOI:
5871 case VMX_EXIT_GDTR_IDTR_ACCESS:
5872 case VMX_EXIT_LDTR_TR_ACCESS:
5873 case VMX_EXIT_APIC_WRITE:
5874 case VMX_EXIT_RDRAND:
5875 case VMX_EXIT_RSM:
5876 case VMX_EXIT_VMFUNC:
5877 case VMX_EXIT_ENCLS:
5878 case VMX_EXIT_RDSEED:
5879 case VMX_EXIT_XSAVES:
5880 case VMX_EXIT_XRSTORS:
5881 case VMX_EXIT_UMWAIT:
5882 case VMX_EXIT_TPAUSE:
5883 case VMX_EXIT_LOADIWKEY:
5884 default:
5885 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5886 }
5887#undef VMEXIT_CALL_RET
5888}
5889#endif /* !HMVMX_USE_FUNCTION_TABLE */
5890
5891
5892#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5893/**
5894 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5895 *
5896 * @returns Strict VBox status code (i.e. informational status codes too).
5897 * @param pVCpu The cross context virtual CPU structure.
5898 * @param pVmxTransient The VMX-transient structure.
5899 */
5900DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5901{
5902 uint32_t const uExitReason = pVmxTransient->uExitReason;
5903 switch (uExitReason)
5904 {
5905# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5906 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5907 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5908# else
5909 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5910 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5911# endif
5912 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5913 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5914 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5915
5916 /*
5917 * We shouldn't direct host physical interrupts to the nested-guest.
5918 */
5919 case VMX_EXIT_EXT_INT:
5920 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5921
5922 /*
5923 * Instructions that cause VM-exits unconditionally or the condition is
5924 * always taken solely from the nested hypervisor (meaning if the VM-exit
5925 * happens, it's guaranteed to be a nested-guest VM-exit).
5926 *
5927 * - Provides VM-exit instruction length ONLY.
5928 */
5929 case VMX_EXIT_CPUID: /* Unconditional. */
5930 case VMX_EXIT_VMCALL:
5931 case VMX_EXIT_GETSEC:
5932 case VMX_EXIT_INVD:
5933 case VMX_EXIT_XSETBV:
5934 case VMX_EXIT_VMLAUNCH:
5935 case VMX_EXIT_VMRESUME:
5936 case VMX_EXIT_VMXOFF:
5937 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5938 case VMX_EXIT_VMFUNC:
5939 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5940
5941 /*
5942 * Instructions that cause VM-exits unconditionally or the condition is
5943 * always taken solely from the nested hypervisor (meaning if the VM-exit
5944 * happens, it's guaranteed to be a nested-guest VM-exit).
5945 *
5946 * - Provides VM-exit instruction length.
5947 * - Provides VM-exit information.
5948 * - Optionally provides Exit qualification.
5949 *
5950 * Since Exit qualification is 0 for all VM-exits where it is not
5951 * applicable, reading and passing it to the guest should produce
5952 * defined behavior.
5953 *
5954 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5955 */
5956 case VMX_EXIT_INVEPT: /* Unconditional. */
5957 case VMX_EXIT_INVVPID:
5958 case VMX_EXIT_VMCLEAR:
5959 case VMX_EXIT_VMPTRLD:
5960 case VMX_EXIT_VMPTRST:
5961 case VMX_EXIT_VMXON:
5962 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5963 case VMX_EXIT_LDTR_TR_ACCESS:
5964 case VMX_EXIT_RDRAND:
5965 case VMX_EXIT_RDSEED:
5966 case VMX_EXIT_XSAVES:
5967 case VMX_EXIT_XRSTORS:
5968 case VMX_EXIT_UMWAIT:
5969 case VMX_EXIT_TPAUSE:
5970 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5971
5972 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5973 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5974 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5975 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5976 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5977 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5978 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5979 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5980 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5981 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5982 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5983 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5984 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5985 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5986 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5987 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5988 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5989 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5990 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5991
5992 case VMX_EXIT_PREEMPT_TIMER:
5993 {
5994 /** @todo NSTVMX: Preempt timer. */
5995 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5996 }
5997
5998 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5999 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
6000
6001 case VMX_EXIT_VMREAD:
6002 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
6003
6004 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
6005 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
6006
6007 case VMX_EXIT_INIT_SIGNAL:
6008 case VMX_EXIT_SIPI:
6009 case VMX_EXIT_IO_SMI:
6010 case VMX_EXIT_SMI:
6011 case VMX_EXIT_ERR_MSR_LOAD:
6012 case VMX_EXIT_ERR_MACHINE_CHECK:
6013 case VMX_EXIT_PML_FULL:
6014 case VMX_EXIT_RSM:
6015 default:
6016 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6017 }
6018}
6019#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6020
6021
6022/** @name VM-exit helpers.
6023 * @{
6024 */
6025/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6026/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6027/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6028
6029/** Macro for VM-exits called unexpectedly. */
6030#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
6031 do { \
6032 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6033 return VERR_VMX_UNEXPECTED_EXIT; \
6034 } while (0)
6035
6036#ifdef VBOX_STRICT
6037# ifndef IN_NEM_DARWIN
6038/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6039# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6040 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6041
6042# define HMVMX_ASSERT_PREEMPT_CPUID() \
6043 do { \
6044 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6045 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6046 } while (0)
6047
6048# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6049 do { \
6050 AssertPtr((a_pVCpu)); \
6051 AssertPtr((a_pVmxTransient)); \
6052 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6053 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6054 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6055 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6056 Assert((a_pVmxTransient)->pVmcsInfo); \
6057 Assert(ASMIntAreEnabled()); \
6058 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6059 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6060 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6061 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6062 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6063 HMVMX_ASSERT_PREEMPT_CPUID(); \
6064 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6065 } while (0)
6066# else
6067# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6068# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6069# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6070 do { \
6071 AssertPtr((a_pVCpu)); \
6072 AssertPtr((a_pVmxTransient)); \
6073 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6074 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6075 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6076 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6077 Assert((a_pVmxTransient)->pVmcsInfo); \
6078 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6079 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6080 } while (0)
6081# endif
6082
6083# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6084 do { \
6085 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6086 Assert((a_pVmxTransient)->fIsNestedGuest); \
6087 } while (0)
6088
6089# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6090 do { \
6091 Log4Func(("\n")); \
6092 } while (0)
6093#else
6094# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6095 do { \
6096 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6097 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6098 } while (0)
6099
6100# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6101 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6102
6103# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6104#endif
6105
6106#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6107/** Macro that does the necessary privilege checks and intercepted VM-exits for
6108 * guests that attempted to execute a VMX instruction. */
6109# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6110 do \
6111 { \
6112 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6113 if (rcStrictTmp == VINF_SUCCESS) \
6114 { /* likely */ } \
6115 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6116 { \
6117 Assert((a_pVCpu)->hm.s.Event.fPending); \
6118 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6119 return VINF_SUCCESS; \
6120 } \
6121 else \
6122 { \
6123 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6124 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6125 } \
6126 } while (0)
6127
6128/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6129# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6130 do \
6131 { \
6132 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6133 (a_pGCPtrEffAddr)); \
6134 if (rcStrictTmp == VINF_SUCCESS) \
6135 { /* likely */ } \
6136 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6137 { \
6138 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6139 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6140 NOREF(uXcptTmp); \
6141 return VINF_SUCCESS; \
6142 } \
6143 else \
6144 { \
6145 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6146 return rcStrictTmp; \
6147 } \
6148 } while (0)
6149#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6150
6151
6152/**
6153 * Advances the guest RIP by the specified number of bytes.
6154 *
6155 * @param pVCpu The cross context virtual CPU structure.
6156 * @param cbInstr Number of bytes to advance the RIP by.
6157 *
6158 * @remarks No-long-jump zone!!!
6159 */
6160DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6161{
6162 /* Advance the RIP. */
6163 pVCpu->cpum.GstCtx.rip += cbInstr;
6164 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
6165
6166 /* Update interrupt inhibition. */
6167 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
6168 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
6169 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6170}
6171
6172
6173/**
6174 * Advances the guest RIP after reading it from the VMCS.
6175 *
6176 * @returns VBox status code, no informational status codes.
6177 * @param pVCpu The cross context virtual CPU structure.
6178 * @param pVmxTransient The VMX-transient structure.
6179 *
6180 * @remarks No-long-jump zone!!!
6181 */
6182static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6183{
6184 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6185 /** @todo consider template here after checking callers. */
6186 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6187 AssertRCReturn(rc, rc);
6188
6189 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6190 return VINF_SUCCESS;
6191}
6192
6193
6194/**
6195 * Handle a condition that occurred while delivering an event through the guest or
6196 * nested-guest IDT.
6197 *
6198 * @returns Strict VBox status code (i.e. informational status codes too).
6199 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6200 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6201 * to continue execution of the guest which will delivery the \#DF.
6202 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6203 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6204 *
6205 * @param pVCpu The cross context virtual CPU structure.
6206 * @param pVmxTransient The VMX-transient structure.
6207 *
6208 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6209 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6210 * is due to an EPT violation, PML full or SPP-related event.
6211 *
6212 * @remarks No-long-jump zone!!!
6213 */
6214static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6215{
6216 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6217 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6218 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6219 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6220 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6221 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6222
6223 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6224 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6225 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6226 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6227 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6228 {
6229 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6230 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6231
6232 /*
6233 * If the event was a software interrupt (generated with INT n) or a software exception
6234 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6235 * can handle the VM-exit and continue guest execution which will re-execute the
6236 * instruction rather than re-injecting the exception, as that can cause premature
6237 * trips to ring-3 before injection and involve TRPM which currently has no way of
6238 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6239 * the problem).
6240 */
6241 IEMXCPTRAISE enmRaise;
6242 IEMXCPTRAISEINFO fRaiseInfo;
6243 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6244 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6245 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6246 {
6247 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6248 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6249 }
6250 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6251 {
6252 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6253 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6254 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6255
6256 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6257 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6258
6259 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6260
6261 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6262 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6263 {
6264 pVmxTransient->fVectoringPF = true;
6265 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6266 }
6267 }
6268 else
6269 {
6270 /*
6271 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6272 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6273 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6274 */
6275 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6276 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6277 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6278 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6279 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6280 }
6281
6282 /*
6283 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6284 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6285 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6286 * subsequent VM-entry would fail, see @bugref{7445}.
6287 *
6288 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6289 */
6290 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6291 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6292 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6293 && CPUMIsGuestNmiBlocking(pVCpu))
6294 {
6295 CPUMSetGuestNmiBlocking(pVCpu, false);
6296 }
6297
6298 switch (enmRaise)
6299 {
6300 case IEMXCPTRAISE_CURRENT_XCPT:
6301 {
6302 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6303 Assert(rcStrict == VINF_SUCCESS);
6304 break;
6305 }
6306
6307 case IEMXCPTRAISE_PREV_EVENT:
6308 {
6309 uint32_t u32ErrCode;
6310 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6311 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6312 else
6313 u32ErrCode = 0;
6314
6315 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6316 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6317 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6318 pVCpu->cpum.GstCtx.cr2);
6319
6320 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6321 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6322 Assert(rcStrict == VINF_SUCCESS);
6323 break;
6324 }
6325
6326 case IEMXCPTRAISE_REEXEC_INSTR:
6327 Assert(rcStrict == VINF_SUCCESS);
6328 break;
6329
6330 case IEMXCPTRAISE_DOUBLE_FAULT:
6331 {
6332 /*
6333 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6334 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6335 */
6336 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6337 {
6338 pVmxTransient->fVectoringDoublePF = true;
6339 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6340 pVCpu->cpum.GstCtx.cr2));
6341 rcStrict = VINF_SUCCESS;
6342 }
6343 else
6344 {
6345 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6346 vmxHCSetPendingXcptDF(pVCpu);
6347 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6348 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6349 rcStrict = VINF_HM_DOUBLE_FAULT;
6350 }
6351 break;
6352 }
6353
6354 case IEMXCPTRAISE_TRIPLE_FAULT:
6355 {
6356 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6357 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6358 rcStrict = VINF_EM_RESET;
6359 break;
6360 }
6361
6362 case IEMXCPTRAISE_CPU_HANG:
6363 {
6364 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6365 rcStrict = VERR_EM_GUEST_CPU_HANG;
6366 break;
6367 }
6368
6369 default:
6370 {
6371 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6372 rcStrict = VERR_VMX_IPE_2;
6373 break;
6374 }
6375 }
6376 }
6377 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6378 && !CPUMIsGuestNmiBlocking(pVCpu))
6379 {
6380 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6381 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6382 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6383 {
6384 /*
6385 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6386 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6387 * that virtual NMIs remain blocked until the IRET execution is completed.
6388 *
6389 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6390 */
6391 CPUMSetGuestNmiBlocking(pVCpu, true);
6392 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6393 }
6394 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6395 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6396 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6397 {
6398 /*
6399 * Execution of IRET caused an EPT violation, page-modification log-full event or
6400 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6401 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6402 * that virtual NMIs remain blocked until the IRET execution is completed.
6403 *
6404 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6405 */
6406 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6407 {
6408 CPUMSetGuestNmiBlocking(pVCpu, true);
6409 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6410 }
6411 }
6412 }
6413
6414 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6415 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6416 return rcStrict;
6417}
6418
6419
6420#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6421/**
6422 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6423 * guest attempting to execute a VMX instruction.
6424 *
6425 * @returns Strict VBox status code (i.e. informational status codes too).
6426 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6427 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6428 *
6429 * @param pVCpu The cross context virtual CPU structure.
6430 * @param uExitReason The VM-exit reason.
6431 *
6432 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6433 * @remarks No-long-jump zone!!!
6434 */
6435static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6436{
6437 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6438 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6439
6440 /*
6441 * The physical CPU would have already checked the CPU mode/code segment.
6442 * We shall just assert here for paranoia.
6443 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6444 */
6445 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6446 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6447 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6448
6449 if (uExitReason == VMX_EXIT_VMXON)
6450 {
6451 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6452
6453 /*
6454 * We check CR4.VMXE because it is required to be always set while in VMX operation
6455 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6456 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6457 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6458 */
6459 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6460 {
6461 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6462 vmxHCSetPendingXcptUD(pVCpu);
6463 return VINF_HM_PENDING_XCPT;
6464 }
6465 }
6466 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6467 {
6468 /*
6469 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6470 * (other than VMXON), we need to raise a #UD.
6471 */
6472 Log4Func(("Not in VMX root mode -> #UD\n"));
6473 vmxHCSetPendingXcptUD(pVCpu);
6474 return VINF_HM_PENDING_XCPT;
6475 }
6476
6477 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6478 return VINF_SUCCESS;
6479}
6480
6481
6482/**
6483 * Decodes the memory operand of an instruction that caused a VM-exit.
6484 *
6485 * The Exit qualification field provides the displacement field for memory
6486 * operand instructions, if any.
6487 *
6488 * @returns Strict VBox status code (i.e. informational status codes too).
6489 * @retval VINF_SUCCESS if the operand was successfully decoded.
6490 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6491 * operand.
6492 * @param pVCpu The cross context virtual CPU structure.
6493 * @param uExitInstrInfo The VM-exit instruction information field.
6494 * @param enmMemAccess The memory operand's access type (read or write).
6495 * @param GCPtrDisp The instruction displacement field, if any. For
6496 * RIP-relative addressing pass RIP + displacement here.
6497 * @param pGCPtrMem Where to store the effective destination memory address.
6498 *
6499 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6500 * virtual-8086 mode hence skips those checks while verifying if the
6501 * segment is valid.
6502 */
6503static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6504 PRTGCPTR pGCPtrMem)
6505{
6506 Assert(pGCPtrMem);
6507 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6508 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6509 | CPUMCTX_EXTRN_CR0);
6510
6511 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6512 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6513 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6514
6515 VMXEXITINSTRINFO ExitInstrInfo;
6516 ExitInstrInfo.u = uExitInstrInfo;
6517 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6518 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6519 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6520 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6521 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6522 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6523 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6524 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6525 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6526
6527 /*
6528 * Validate instruction information.
6529 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6530 */
6531 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6532 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6533 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6534 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6535 AssertLogRelMsgReturn(fIsMemOperand,
6536 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6537
6538 /*
6539 * Compute the complete effective address.
6540 *
6541 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6542 * See AMD spec. 4.5.2 "Segment Registers".
6543 */
6544 RTGCPTR GCPtrMem = GCPtrDisp;
6545 if (fBaseRegValid)
6546 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6547 if (fIdxRegValid)
6548 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6549
6550 RTGCPTR const GCPtrOff = GCPtrMem;
6551 if ( !fIsLongMode
6552 || iSegReg >= X86_SREG_FS)
6553 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6554 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6555
6556 /*
6557 * Validate effective address.
6558 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6559 */
6560 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6561 Assert(cbAccess > 0);
6562 if (fIsLongMode)
6563 {
6564 if (X86_IS_CANONICAL(GCPtrMem))
6565 {
6566 *pGCPtrMem = GCPtrMem;
6567 return VINF_SUCCESS;
6568 }
6569
6570 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6571 * "Data Limit Checks in 64-bit Mode". */
6572 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6573 vmxHCSetPendingXcptGP(pVCpu, 0);
6574 return VINF_HM_PENDING_XCPT;
6575 }
6576
6577 /*
6578 * This is a watered down version of iemMemApplySegment().
6579 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6580 * and segment CPL/DPL checks are skipped.
6581 */
6582 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6583 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6584 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6585
6586 /* Check if the segment is present and usable. */
6587 if ( pSel->Attr.n.u1Present
6588 && !pSel->Attr.n.u1Unusable)
6589 {
6590 Assert(pSel->Attr.n.u1DescType);
6591 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6592 {
6593 /* Check permissions for the data segment. */
6594 if ( enmMemAccess == VMXMEMACCESS_WRITE
6595 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6596 {
6597 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6598 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6599 return VINF_HM_PENDING_XCPT;
6600 }
6601
6602 /* Check limits if it's a normal data segment. */
6603 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6604 {
6605 if ( GCPtrFirst32 > pSel->u32Limit
6606 || GCPtrLast32 > pSel->u32Limit)
6607 {
6608 Log4Func(("Data segment limit exceeded. "
6609 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6610 GCPtrLast32, pSel->u32Limit));
6611 if (iSegReg == X86_SREG_SS)
6612 vmxHCSetPendingXcptSS(pVCpu, 0);
6613 else
6614 vmxHCSetPendingXcptGP(pVCpu, 0);
6615 return VINF_HM_PENDING_XCPT;
6616 }
6617 }
6618 else
6619 {
6620 /* Check limits if it's an expand-down data segment.
6621 Note! The upper boundary is defined by the B bit, not the G bit! */
6622 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6623 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6624 {
6625 Log4Func(("Expand-down data segment limit exceeded. "
6626 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6627 GCPtrLast32, pSel->u32Limit));
6628 if (iSegReg == X86_SREG_SS)
6629 vmxHCSetPendingXcptSS(pVCpu, 0);
6630 else
6631 vmxHCSetPendingXcptGP(pVCpu, 0);
6632 return VINF_HM_PENDING_XCPT;
6633 }
6634 }
6635 }
6636 else
6637 {
6638 /* Check permissions for the code segment. */
6639 if ( enmMemAccess == VMXMEMACCESS_WRITE
6640 || ( enmMemAccess == VMXMEMACCESS_READ
6641 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6642 {
6643 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6644 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6645 vmxHCSetPendingXcptGP(pVCpu, 0);
6646 return VINF_HM_PENDING_XCPT;
6647 }
6648
6649 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6650 if ( GCPtrFirst32 > pSel->u32Limit
6651 || GCPtrLast32 > pSel->u32Limit)
6652 {
6653 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6654 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6655 if (iSegReg == X86_SREG_SS)
6656 vmxHCSetPendingXcptSS(pVCpu, 0);
6657 else
6658 vmxHCSetPendingXcptGP(pVCpu, 0);
6659 return VINF_HM_PENDING_XCPT;
6660 }
6661 }
6662 }
6663 else
6664 {
6665 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6666 vmxHCSetPendingXcptGP(pVCpu, 0);
6667 return VINF_HM_PENDING_XCPT;
6668 }
6669
6670 *pGCPtrMem = GCPtrMem;
6671 return VINF_SUCCESS;
6672}
6673#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6674
6675
6676/**
6677 * VM-exit helper for LMSW.
6678 */
6679static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6680{
6681 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6682 AssertRCReturn(rc, rc);
6683
6684 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6685 AssertMsg( rcStrict == VINF_SUCCESS
6686 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6687
6688 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6689 if (rcStrict == VINF_IEM_RAISED_XCPT)
6690 {
6691 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6692 rcStrict = VINF_SUCCESS;
6693 }
6694
6695 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6696 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6697 return rcStrict;
6698}
6699
6700
6701/**
6702 * VM-exit helper for CLTS.
6703 */
6704static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6705{
6706 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6707 AssertRCReturn(rc, rc);
6708
6709 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6710 AssertMsg( rcStrict == VINF_SUCCESS
6711 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6712
6713 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6714 if (rcStrict == VINF_IEM_RAISED_XCPT)
6715 {
6716 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6717 rcStrict = VINF_SUCCESS;
6718 }
6719
6720 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6721 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6722 return rcStrict;
6723}
6724
6725
6726/**
6727 * VM-exit helper for MOV from CRx (CRx read).
6728 */
6729static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6730{
6731 Assert(iCrReg < 16);
6732 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6733
6734 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6735 AssertRCReturn(rc, rc);
6736
6737 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6738 AssertMsg( rcStrict == VINF_SUCCESS
6739 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6740
6741 if (iGReg == X86_GREG_xSP)
6742 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6743 else
6744 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6745#ifdef VBOX_WITH_STATISTICS
6746 switch (iCrReg)
6747 {
6748 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6749 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6750 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6751 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6752 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6753 }
6754#endif
6755 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6756 return rcStrict;
6757}
6758
6759
6760/**
6761 * VM-exit helper for MOV to CRx (CRx write).
6762 */
6763static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6764{
6765 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6766
6767 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6768 AssertMsg( rcStrict == VINF_SUCCESS
6769 || rcStrict == VINF_IEM_RAISED_XCPT
6770 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6771
6772 switch (iCrReg)
6773 {
6774 case 0:
6775 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6776 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6777 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6778 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6779 break;
6780
6781 case 2:
6782 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6783 /* Nothing to do here, CR2 it's not part of the VMCS. */
6784 break;
6785
6786 case 3:
6787 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6788 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6789 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6790 break;
6791
6792 case 4:
6793 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6794 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6795#ifndef IN_NEM_DARWIN
6796 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6797 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6798#else
6799 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6800#endif
6801 break;
6802
6803 case 8:
6804 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6805 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6806 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6807 break;
6808
6809 default:
6810 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6811 break;
6812 }
6813
6814 if (rcStrict == VINF_IEM_RAISED_XCPT)
6815 {
6816 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6817 rcStrict = VINF_SUCCESS;
6818 }
6819 return rcStrict;
6820}
6821
6822
6823/**
6824 * VM-exit exception handler for \#PF (Page-fault exception).
6825 *
6826 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6827 */
6828static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6829{
6830 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6831 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6832
6833#ifndef IN_NEM_DARWIN
6834 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6835 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6836 { /* likely */ }
6837 else
6838#endif
6839 {
6840#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6841 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6842#endif
6843 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6844 if (!pVmxTransient->fVectoringDoublePF)
6845 {
6846 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6847 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6848 }
6849 else
6850 {
6851 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6852 Assert(!pVmxTransient->fIsNestedGuest);
6853 vmxHCSetPendingXcptDF(pVCpu);
6854 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6855 }
6856 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6857 return VINF_SUCCESS;
6858 }
6859
6860 Assert(!pVmxTransient->fIsNestedGuest);
6861
6862 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6863 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6864 if (pVmxTransient->fVectoringPF)
6865 {
6866 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6867 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6868 }
6869
6870 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6871 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6872 AssertRCReturn(rc, rc);
6873
6874 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
6875 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
6876
6877 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6878 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
6879
6880 Log4Func(("#PF: rc=%Rrc\n", rc));
6881 if (rc == VINF_SUCCESS)
6882 {
6883 /*
6884 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6885 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6886 */
6887 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6888 TRPMResetTrap(pVCpu);
6889 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6890 return rc;
6891 }
6892
6893 if (rc == VINF_EM_RAW_GUEST_TRAP)
6894 {
6895 if (!pVmxTransient->fVectoringDoublePF)
6896 {
6897 /* It's a guest page fault and needs to be reflected to the guest. */
6898 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6899 TRPMResetTrap(pVCpu);
6900 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6901 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6902 uGstErrorCode, pVmxTransient->uExitQual);
6903 }
6904 else
6905 {
6906 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6907 TRPMResetTrap(pVCpu);
6908 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6909 vmxHCSetPendingXcptDF(pVCpu);
6910 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6911 }
6912
6913 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6914 return VINF_SUCCESS;
6915 }
6916
6917 TRPMResetTrap(pVCpu);
6918 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6919 return rc;
6920}
6921
6922
6923/**
6924 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6925 *
6926 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6927 */
6928static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6929{
6930 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6931 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6932
6933 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6934 AssertRCReturn(rc, rc);
6935
6936 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6937 {
6938 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6939 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6940
6941 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6942 * provides VM-exit instruction length. If this causes problem later,
6943 * disassemble the instruction like it's done on AMD-V. */
6944 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6945 AssertRCReturn(rc2, rc2);
6946 return rc;
6947 }
6948
6949 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6950 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6951 return VINF_SUCCESS;
6952}
6953
6954
6955/**
6956 * VM-exit exception handler for \#BP (Breakpoint exception).
6957 *
6958 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6959 */
6960static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6961{
6962 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6963 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6964
6965 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6966 AssertRCReturn(rc, rc);
6967
6968 VBOXSTRICTRC rcStrict;
6969 if (!pVmxTransient->fIsNestedGuest)
6970 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
6971 else
6972 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6973
6974 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6975 {
6976 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6977 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6978 rcStrict = VINF_SUCCESS;
6979 }
6980
6981 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6982 return rcStrict;
6983}
6984
6985
6986/**
6987 * VM-exit exception handler for \#AC (Alignment-check exception).
6988 *
6989 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6990 */
6991static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6992{
6993 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6994
6995 /*
6996 * Detect #ACs caused by host having enabled split-lock detection.
6997 * Emulate such instructions.
6998 */
6999#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
7000 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7001 AssertRCReturn(rc, rc);
7002 /** @todo detect split lock in cpu feature? */
7003 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
7004 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7005 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
7006 || CPUMGetGuestCPL(pVCpu) != 3
7007 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
7008 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
7009 {
7010 /*
7011 * Check for debug/trace events and import state accordingly.
7012 */
7013 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
7014 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7015 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7016#ifndef IN_NEM_DARWIN
7017 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7018#endif
7019 )
7020 {
7021 if (pVM->cCpus == 1)
7022 {
7023#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7024 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7025 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7026#else
7027 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7028 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7029#endif
7030 AssertRCReturn(rc, rc);
7031 }
7032 }
7033 else
7034 {
7035 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7036 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7037 AssertRCReturn(rc, rc);
7038
7039 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7040
7041 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7042 {
7043 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7044 if (rcStrict != VINF_SUCCESS)
7045 return rcStrict;
7046 }
7047 }
7048
7049 /*
7050 * Emulate the instruction.
7051 *
7052 * We have to ignore the LOCK prefix here as we must not retrigger the
7053 * detection on the host. This isn't all that satisfactory, though...
7054 */
7055 if (pVM->cCpus == 1)
7056 {
7057 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7058 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7059
7060 /** @todo For SMP configs we should do a rendezvous here. */
7061 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7062 if (rcStrict == VINF_SUCCESS)
7063#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7064 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7065 HM_CHANGED_GUEST_RIP
7066 | HM_CHANGED_GUEST_RFLAGS
7067 | HM_CHANGED_GUEST_GPRS_MASK
7068 | HM_CHANGED_GUEST_CS
7069 | HM_CHANGED_GUEST_SS);
7070#else
7071 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7072#endif
7073 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7074 {
7075 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7076 rcStrict = VINF_SUCCESS;
7077 }
7078 return rcStrict;
7079 }
7080 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7081 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7082 return VINF_EM_EMULATE_SPLIT_LOCK;
7083 }
7084
7085 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7086 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7087 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7088
7089 /* Re-inject it. We'll detect any nesting before getting here. */
7090 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7091 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7092 return VINF_SUCCESS;
7093}
7094
7095
7096/**
7097 * VM-exit exception handler for \#DB (Debug exception).
7098 *
7099 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7100 */
7101static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7102{
7103 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7104 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7105
7106 /*
7107 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7108 */
7109 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7110
7111 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7112 uint64_t const uDR6 = X86_DR6_INIT_VAL
7113 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7114 | X86_DR6_BD | X86_DR6_BS));
7115
7116 int rc;
7117 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7118 if (!pVmxTransient->fIsNestedGuest)
7119 {
7120 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7121
7122 /*
7123 * Prevents stepping twice over the same instruction when the guest is stepping using
7124 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7125 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7126 */
7127 if ( rc == VINF_EM_DBG_STEPPED
7128 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7129 {
7130 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7131 rc = VINF_EM_RAW_GUEST_TRAP;
7132 }
7133 }
7134 else
7135 rc = VINF_EM_RAW_GUEST_TRAP;
7136 Log6Func(("rc=%Rrc\n", rc));
7137 if (rc == VINF_EM_RAW_GUEST_TRAP)
7138 {
7139 /*
7140 * The exception was for the guest. Update DR6, DR7.GD and
7141 * IA32_DEBUGCTL.LBR before forwarding it.
7142 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
7143 */
7144#ifndef IN_NEM_DARWIN
7145 VMMRZCallRing3Disable(pVCpu);
7146 HM_DISABLE_PREEMPT(pVCpu);
7147
7148 pCtx->dr[6] &= ~X86_DR6_B_MASK;
7149 pCtx->dr[6] |= uDR6;
7150 if (CPUMIsGuestDebugStateActive(pVCpu))
7151 ASMSetDR6(pCtx->dr[6]);
7152
7153 HM_RESTORE_PREEMPT();
7154 VMMRZCallRing3Enable(pVCpu);
7155#else
7156 /** @todo */
7157#endif
7158
7159 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7160 AssertRCReturn(rc, rc);
7161
7162 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7163 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
7164
7165 /* Paranoia. */
7166 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7167 pCtx->dr[7] |= X86_DR7_RA1_MASK;
7168
7169 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
7170 AssertRC(rc);
7171
7172 /*
7173 * Raise #DB in the guest.
7174 *
7175 * It is important to reflect exactly what the VM-exit gave us (preserving the
7176 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7177 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7178 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7179 *
7180 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7181 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7182 */
7183 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7184 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7185 return VINF_SUCCESS;
7186 }
7187
7188 /*
7189 * Not a guest trap, must be a hypervisor related debug event then.
7190 * Update DR6 in case someone is interested in it.
7191 */
7192 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7193 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7194 CPUMSetHyperDR6(pVCpu, uDR6);
7195
7196 return rc;
7197}
7198
7199
7200/**
7201 * Hacks its way around the lovely mesa driver's backdoor accesses.
7202 *
7203 * @sa hmR0SvmHandleMesaDrvGp.
7204 */
7205static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7206{
7207 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7208 RT_NOREF(pCtx);
7209
7210 /* For now we'll just skip the instruction. */
7211 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7212}
7213
7214
7215/**
7216 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7217 * backdoor logging w/o checking what it is running inside.
7218 *
7219 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7220 * backdoor port and magic numbers loaded in registers.
7221 *
7222 * @returns true if it is, false if it isn't.
7223 * @sa hmR0SvmIsMesaDrvGp.
7224 */
7225DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7226{
7227 /* 0xed: IN eAX,dx */
7228 uint8_t abInstr[1];
7229 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7230 return false;
7231
7232 /* Check that it is #GP(0). */
7233 if (pVmxTransient->uExitIntErrorCode != 0)
7234 return false;
7235
7236 /* Check magic and port. */
7237 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7238 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7239 if (pCtx->rax != UINT32_C(0x564d5868))
7240 return false;
7241 if (pCtx->dx != UINT32_C(0x5658))
7242 return false;
7243
7244 /* Flat ring-3 CS. */
7245 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7246 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7247 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7248 if (pCtx->cs.Attr.n.u2Dpl != 3)
7249 return false;
7250 if (pCtx->cs.u64Base != 0)
7251 return false;
7252
7253 /* Check opcode. */
7254 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7255 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7256 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7257 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7258 if (RT_FAILURE(rc))
7259 return false;
7260 if (abInstr[0] != 0xed)
7261 return false;
7262
7263 return true;
7264}
7265
7266
7267/**
7268 * VM-exit exception handler for \#GP (General-protection exception).
7269 *
7270 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7271 */
7272static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7273{
7274 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7275 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7276
7277 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7278 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7279#ifndef IN_NEM_DARWIN
7280 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7281 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7282 { /* likely */ }
7283 else
7284#endif
7285 {
7286#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7287# ifndef IN_NEM_DARWIN
7288 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7289# else
7290 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7291# endif
7292#endif
7293 /*
7294 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7295 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7296 */
7297 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7298 AssertRCReturn(rc, rc);
7299 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7300 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7301
7302 if ( pVmxTransient->fIsNestedGuest
7303 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7304 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7305 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7306 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7307 else
7308 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7309 return rc;
7310 }
7311
7312#ifndef IN_NEM_DARWIN
7313 Assert(CPUMIsGuestInRealModeEx(pCtx));
7314 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7315 Assert(!pVmxTransient->fIsNestedGuest);
7316
7317 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7318 AssertRCReturn(rc, rc);
7319
7320 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7321 if (rcStrict == VINF_SUCCESS)
7322 {
7323 if (!CPUMIsGuestInRealModeEx(pCtx))
7324 {
7325 /*
7326 * The guest is no longer in real-mode, check if we can continue executing the
7327 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7328 */
7329 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7330 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7331 {
7332 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7333 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7334 }
7335 else
7336 {
7337 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7338 rcStrict = VINF_EM_RESCHEDULE;
7339 }
7340 }
7341 else
7342 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7343 }
7344 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7345 {
7346 rcStrict = VINF_SUCCESS;
7347 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7348 }
7349 return VBOXSTRICTRC_VAL(rcStrict);
7350#endif
7351}
7352
7353
7354/**
7355 * VM-exit exception handler for \#DE (Divide Error).
7356 *
7357 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7358 */
7359static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7360{
7361 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7362 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7363
7364 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7365 AssertRCReturn(rc, rc);
7366
7367 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7368 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7369 {
7370 uint8_t cbInstr = 0;
7371 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7372 if (rc2 == VINF_SUCCESS)
7373 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7374 else if (rc2 == VERR_NOT_FOUND)
7375 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7376 else
7377 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7378 }
7379 else
7380 rcStrict = VINF_SUCCESS; /* Do nothing. */
7381
7382 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7383 if (RT_FAILURE(rcStrict))
7384 {
7385 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7386 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7387 rcStrict = VINF_SUCCESS;
7388 }
7389
7390 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7391 return VBOXSTRICTRC_VAL(rcStrict);
7392}
7393
7394
7395/**
7396 * VM-exit exception handler wrapper for all other exceptions that are not handled
7397 * by a specific handler.
7398 *
7399 * This simply re-injects the exception back into the VM without any special
7400 * processing.
7401 *
7402 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7403 */
7404static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7405{
7406 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7407
7408#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7409# ifndef IN_NEM_DARWIN
7410 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7411 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7412 ("uVector=%#x u32XcptBitmap=%#X32\n",
7413 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7414 NOREF(pVmcsInfo);
7415# endif
7416#endif
7417
7418 /*
7419 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7420 * would have been handled while checking exits due to event delivery.
7421 */
7422 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7423
7424#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7425 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7426 AssertRCReturn(rc, rc);
7427 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7428#endif
7429
7430#ifdef VBOX_WITH_STATISTICS
7431 switch (uVector)
7432 {
7433 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7434 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7435 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7436 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7437 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7438 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7439 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7440 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7441 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7442 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7443 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7444 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7445 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7446 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7447 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7448 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7449 default:
7450 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7451 break;
7452 }
7453#endif
7454
7455 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7456 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7457 NOREF(uVector);
7458
7459 /* Re-inject the original exception into the guest. */
7460 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7461 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7462 return VINF_SUCCESS;
7463}
7464
7465
7466/**
7467 * VM-exit exception handler for all exceptions (except NMIs!).
7468 *
7469 * @remarks This may be called for both guests and nested-guests. Take care to not
7470 * make assumptions and avoid doing anything that is not relevant when
7471 * executing a nested-guest (e.g., Mesa driver hacks).
7472 */
7473static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7474{
7475 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7476
7477 /*
7478 * If this VM-exit occurred while delivering an event through the guest IDT, take
7479 * action based on the return code and additional hints (e.g. for page-faults)
7480 * that will be updated in the VMX transient structure.
7481 */
7482 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7483 if (rcStrict == VINF_SUCCESS)
7484 {
7485 /*
7486 * If an exception caused a VM-exit due to delivery of an event, the original
7487 * event may have to be re-injected into the guest. We shall reinject it and
7488 * continue guest execution. However, page-fault is a complicated case and
7489 * needs additional processing done in vmxHCExitXcptPF().
7490 */
7491 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7492 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7493 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7494 || uVector == X86_XCPT_PF)
7495 {
7496 switch (uVector)
7497 {
7498 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7499 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7500 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7501 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7502 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7503 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7504 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7505 default:
7506 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7507 }
7508 }
7509 /* else: inject pending event before resuming guest execution. */
7510 }
7511 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7512 {
7513 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7514 rcStrict = VINF_SUCCESS;
7515 }
7516
7517 return rcStrict;
7518}
7519/** @} */
7520
7521
7522/** @name VM-exit handlers.
7523 * @{
7524 */
7525/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7526/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7527/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7528
7529/**
7530 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7531 */
7532HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7533{
7534 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7535 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7536
7537#ifndef IN_NEM_DARWIN
7538 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7539 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7540 return VINF_SUCCESS;
7541 return VINF_EM_RAW_INTERRUPT;
7542#else
7543 return VINF_SUCCESS;
7544#endif
7545}
7546
7547
7548/**
7549 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7550 * VM-exit.
7551 */
7552HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7553{
7554 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7555 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7556
7557 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7558
7559 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7560 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7561 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7562
7563 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7564 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7565 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7566 NOREF(pVmcsInfo);
7567
7568 VBOXSTRICTRC rcStrict;
7569 switch (uExitIntType)
7570 {
7571#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7572 /*
7573 * Host physical NMIs:
7574 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7575 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7576 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7577 *
7578 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7579 * See Intel spec. 27.5.5 "Updating Non-Register State".
7580 */
7581 case VMX_EXIT_INT_INFO_TYPE_NMI:
7582 {
7583 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7584 break;
7585 }
7586#endif
7587
7588 /*
7589 * Privileged software exceptions (#DB from ICEBP),
7590 * Software exceptions (#BP and #OF),
7591 * Hardware exceptions:
7592 * Process the required exceptions and resume guest execution if possible.
7593 */
7594 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7595 Assert(uVector == X86_XCPT_DB);
7596 RT_FALL_THRU();
7597 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7598 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7599 RT_FALL_THRU();
7600 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7601 {
7602 NOREF(uVector);
7603 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7604 | HMVMX_READ_EXIT_INSTR_LEN
7605 | HMVMX_READ_IDT_VECTORING_INFO
7606 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7607 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7608 break;
7609 }
7610
7611 default:
7612 {
7613 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7614 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7615 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7616 break;
7617 }
7618 }
7619
7620 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7621 return rcStrict;
7622}
7623
7624
7625/**
7626 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7627 */
7628HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7629{
7630 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7631
7632 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7633 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7634 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7635
7636 /* Evaluate and deliver pending events and resume guest execution. */
7637 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7638 return VINF_SUCCESS;
7639}
7640
7641
7642/**
7643 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7644 */
7645HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7646{
7647 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7648
7649 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7650 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7651 {
7652 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7653 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7654 }
7655
7656 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
7657
7658 /*
7659 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7660 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7661 */
7662 uint32_t fIntrState;
7663 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7664 AssertRC(rc);
7665 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7666 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7667 {
7668 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7669 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7670
7671 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7672 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7673 AssertRC(rc);
7674 }
7675
7676 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7677 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7678
7679 /* Evaluate and deliver pending events and resume guest execution. */
7680 return VINF_SUCCESS;
7681}
7682
7683
7684/**
7685 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7686 */
7687HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7688{
7689 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7690 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7691}
7692
7693
7694/**
7695 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7696 */
7697HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7698{
7699 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7700 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7701}
7702
7703
7704/**
7705 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7706 */
7707HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7708{
7709 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7710
7711 /*
7712 * Get the state we need and update the exit history entry.
7713 */
7714 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7715 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7716 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7717 AssertRCReturn(rc, rc);
7718
7719 VBOXSTRICTRC rcStrict;
7720 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7721 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7722 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7723 if (!pExitRec)
7724 {
7725 /*
7726 * Regular CPUID instruction execution.
7727 */
7728 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7729 if (rcStrict == VINF_SUCCESS)
7730 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7731 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7732 {
7733 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7734 rcStrict = VINF_SUCCESS;
7735 }
7736 }
7737 else
7738 {
7739 /*
7740 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7741 */
7742 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7743 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7744 AssertRCReturn(rc2, rc2);
7745
7746 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7747 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7748
7749 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7750 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7751
7752 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7753 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7754 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7755 }
7756 return rcStrict;
7757}
7758
7759
7760/**
7761 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7762 */
7763HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7764{
7765 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7766
7767 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7768 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7769 AssertRCReturn(rc, rc);
7770
7771 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7772 return VINF_EM_RAW_EMULATE_INSTR;
7773
7774 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7775 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7776}
7777
7778
7779/**
7780 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7781 */
7782HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7783{
7784 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7785
7786 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7787 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7788 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7789 AssertRCReturn(rc, rc);
7790
7791 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7792 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7793 {
7794 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7795 we must reset offsetting on VM-entry. See @bugref{6634}. */
7796 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7797 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7798 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7799 }
7800 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7801 {
7802 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7803 rcStrict = VINF_SUCCESS;
7804 }
7805 return rcStrict;
7806}
7807
7808
7809/**
7810 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7811 */
7812HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7813{
7814 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7815
7816 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7817 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7818 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7819 AssertRCReturn(rc, rc);
7820
7821 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7822 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7823 {
7824 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7825 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7826 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7827 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7828 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7829 }
7830 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7831 {
7832 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7833 rcStrict = VINF_SUCCESS;
7834 }
7835 return rcStrict;
7836}
7837
7838
7839/**
7840 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7841 */
7842HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7843{
7844 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7845
7846 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7847 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
7848 | CPUMCTX_EXTRN_CR0
7849 | CPUMCTX_EXTRN_RFLAGS
7850 | CPUMCTX_EXTRN_RIP
7851 | CPUMCTX_EXTRN_SS>(pVCpu, pVmcsInfo, __FUNCTION__);
7852 AssertRCReturn(rc, rc);
7853
7854 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7855 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7856 if (RT_LIKELY(rc == VINF_SUCCESS))
7857 {
7858 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7859 Assert(pVmxTransient->cbExitInstr == 2);
7860 }
7861 else
7862 {
7863 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7864 rc = VERR_EM_INTERPRETER;
7865 }
7866 return rc;
7867}
7868
7869
7870/**
7871 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7872 */
7873HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7874{
7875 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7876
7877 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7878 if (EMAreHypercallInstructionsEnabled(pVCpu))
7879 {
7880 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7881 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
7882 | CPUMCTX_EXTRN_RFLAGS
7883 | CPUMCTX_EXTRN_CR0
7884 | CPUMCTX_EXTRN_SS
7885 | CPUMCTX_EXTRN_CS
7886 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
7887 AssertRCReturn(rc, rc);
7888
7889 /* Perform the hypercall. */
7890 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7891 if (rcStrict == VINF_SUCCESS)
7892 {
7893 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7894 AssertRCReturn(rc, rc);
7895 }
7896 else
7897 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7898 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7899 || RT_FAILURE(rcStrict));
7900
7901 /* If the hypercall changes anything other than guest's general-purpose registers,
7902 we would need to reload the guest changed bits here before VM-entry. */
7903 }
7904 else
7905 Log4Func(("Hypercalls not enabled\n"));
7906
7907 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7908 if (RT_FAILURE(rcStrict))
7909 {
7910 vmxHCSetPendingXcptUD(pVCpu);
7911 rcStrict = VINF_SUCCESS;
7912 }
7913
7914 return rcStrict;
7915}
7916
7917
7918/**
7919 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7920 */
7921HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7922{
7923 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7924#ifndef IN_NEM_DARWIN
7925 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7926#endif
7927
7928 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7929 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
7930 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7931 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7932 AssertRCReturn(rc, rc);
7933
7934 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7935
7936 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7937 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7938 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7939 {
7940 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7941 rcStrict = VINF_SUCCESS;
7942 }
7943 else
7944 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7945 VBOXSTRICTRC_VAL(rcStrict)));
7946 return rcStrict;
7947}
7948
7949
7950/**
7951 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7952 */
7953HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7954{
7955 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7956
7957 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7958 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7959 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
7960 AssertRCReturn(rc, rc);
7961
7962 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7963 if (rcStrict == VINF_SUCCESS)
7964 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7965 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7966 {
7967 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7968 rcStrict = VINF_SUCCESS;
7969 }
7970
7971 return rcStrict;
7972}
7973
7974
7975/**
7976 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7977 */
7978HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7979{
7980 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7981
7982 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7983 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7984 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7985 AssertRCReturn(rc, rc);
7986
7987 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7988 if (RT_SUCCESS(rcStrict))
7989 {
7990 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7991 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7992 rcStrict = VINF_SUCCESS;
7993 }
7994
7995 return rcStrict;
7996}
7997
7998
7999/**
8000 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
8001 * VM-exit.
8002 */
8003HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8004{
8005 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8006 return VINF_EM_RESET;
8007}
8008
8009
8010/**
8011 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8012 */
8013HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8014{
8015 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8016
8017 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8018 AssertRCReturn(rc, rc);
8019
8020 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
8021 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8022 rc = VINF_SUCCESS;
8023 else
8024 rc = VINF_EM_HALT;
8025
8026 if (rc != VINF_SUCCESS)
8027 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8028 return rc;
8029}
8030
8031
8032#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8033/**
8034 * VM-exit handler for instructions that result in a \#UD exception delivered to
8035 * the guest.
8036 */
8037HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8038{
8039 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8040 vmxHCSetPendingXcptUD(pVCpu);
8041 return VINF_SUCCESS;
8042}
8043#endif
8044
8045
8046/**
8047 * VM-exit handler for expiry of the VMX-preemption timer.
8048 */
8049HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8050{
8051 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8052
8053 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8054 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8055Log12(("vmxHCExitPreemptTimer:\n"));
8056
8057 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8058 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8059 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8060 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8061 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8062}
8063
8064
8065/**
8066 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8067 */
8068HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8069{
8070 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8071
8072 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8073 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8074 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8075 AssertRCReturn(rc, rc);
8076
8077 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8078 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8079 : HM_CHANGED_RAISED_XCPT_MASK);
8080
8081#ifndef IN_NEM_DARWIN
8082 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8083 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8084 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8085 {
8086 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8087 hmR0VmxUpdateStartVmFunction(pVCpu);
8088 }
8089#endif
8090
8091 return rcStrict;
8092}
8093
8094
8095/**
8096 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8097 */
8098HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8099{
8100 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8101
8102 /** @todo Enable the new code after finding a reliably guest test-case. */
8103#if 1
8104 return VERR_EM_INTERPRETER;
8105#else
8106 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8107 | HMVMX_READ_EXIT_INSTR_INFO
8108 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8109 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8110 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8111 AssertRCReturn(rc, rc);
8112
8113 /* Paranoia. Ensure this has a memory operand. */
8114 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8115
8116 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8117 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8118 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8119 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8120
8121 RTGCPTR GCPtrDesc;
8122 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8123
8124 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8125 GCPtrDesc, uType);
8126 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8127 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8128 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8129 {
8130 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8131 rcStrict = VINF_SUCCESS;
8132 }
8133 return rcStrict;
8134#endif
8135}
8136
8137
8138/**
8139 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8140 * VM-exit.
8141 */
8142HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8143{
8144 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8145 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8146 AssertRCReturn(rc, rc);
8147
8148 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8149 if (RT_FAILURE(rc))
8150 return rc;
8151
8152 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8153 NOREF(uInvalidReason);
8154
8155#ifdef VBOX_STRICT
8156 uint32_t fIntrState;
8157 uint64_t u64Val;
8158 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8159 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8160 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8161
8162 Log4(("uInvalidReason %u\n", uInvalidReason));
8163 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8164 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8165 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8166
8167 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8168 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8169 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8170 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8171 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8172 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8173 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8174 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8175 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8176 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8177 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8178 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8179# ifndef IN_NEM_DARWIN
8180 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8181 {
8182 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8183 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8184 }
8185
8186 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8187# endif
8188#endif
8189
8190 return VERR_VMX_INVALID_GUEST_STATE;
8191}
8192
8193/**
8194 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8195 */
8196HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8197{
8198 /*
8199 * Cumulative notes of all recognized but unexpected VM-exits.
8200 *
8201 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8202 * nested-paging is used.
8203 *
8204 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8205 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8206 * this function (and thereby stop VM execution) for handling such instructions.
8207 *
8208 *
8209 * VMX_EXIT_INIT_SIGNAL:
8210 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8211 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8212 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8213 *
8214 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8215 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8216 * See Intel spec. "23.8 Restrictions on VMX operation".
8217 *
8218 * VMX_EXIT_SIPI:
8219 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8220 * activity state is used. We don't make use of it as our guests don't have direct
8221 * access to the host local APIC.
8222 *
8223 * See Intel spec. 25.3 "Other Causes of VM-exits".
8224 *
8225 * VMX_EXIT_IO_SMI:
8226 * VMX_EXIT_SMI:
8227 * This can only happen if we support dual-monitor treatment of SMI, which can be
8228 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8229 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8230 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8231 *
8232 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8233 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8234 *
8235 * VMX_EXIT_ERR_MSR_LOAD:
8236 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8237 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8238 * execution.
8239 *
8240 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8241 *
8242 * VMX_EXIT_ERR_MACHINE_CHECK:
8243 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8244 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8245 * #MC exception abort class exception is raised. We thus cannot assume a
8246 * reasonable chance of continuing any sort of execution and we bail.
8247 *
8248 * See Intel spec. 15.1 "Machine-check Architecture".
8249 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8250 *
8251 * VMX_EXIT_PML_FULL:
8252 * VMX_EXIT_VIRTUALIZED_EOI:
8253 * VMX_EXIT_APIC_WRITE:
8254 * We do not currently support any of these features and thus they are all unexpected
8255 * VM-exits.
8256 *
8257 * VMX_EXIT_GDTR_IDTR_ACCESS:
8258 * VMX_EXIT_LDTR_TR_ACCESS:
8259 * VMX_EXIT_RDRAND:
8260 * VMX_EXIT_RSM:
8261 * VMX_EXIT_VMFUNC:
8262 * VMX_EXIT_ENCLS:
8263 * VMX_EXIT_RDSEED:
8264 * VMX_EXIT_XSAVES:
8265 * VMX_EXIT_XRSTORS:
8266 * VMX_EXIT_UMWAIT:
8267 * VMX_EXIT_TPAUSE:
8268 * VMX_EXIT_LOADIWKEY:
8269 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8270 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8271 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8272 *
8273 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8274 */
8275 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8276 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8277 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8278}
8279
8280
8281/**
8282 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8283 */
8284HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8285{
8286 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8287
8288 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8289
8290 /** @todo Optimize this: We currently drag in the whole MSR state
8291 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8292 * MSRs required. That would require changes to IEM and possibly CPUM too.
8293 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8294 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8295 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8296 int rc;
8297 switch (idMsr)
8298 {
8299 default:
8300 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8301 __FUNCTION__);
8302 AssertRCReturn(rc, rc);
8303 break;
8304 case MSR_K8_FS_BASE:
8305 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8306 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8307 AssertRCReturn(rc, rc);
8308 break;
8309 case MSR_K8_GS_BASE:
8310 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8311 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8312 AssertRCReturn(rc, rc);
8313 break;
8314 }
8315
8316 Log4Func(("ecx=%#RX32\n", idMsr));
8317
8318#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8319 Assert(!pVmxTransient->fIsNestedGuest);
8320 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8321 {
8322 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8323 && idMsr != MSR_K6_EFER)
8324 {
8325 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8326 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8327 }
8328 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8329 {
8330 Assert(pVmcsInfo->pvMsrBitmap);
8331 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8332 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8333 {
8334 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8335 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8336 }
8337 }
8338 }
8339#endif
8340
8341 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8342 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8343 if (rcStrict == VINF_SUCCESS)
8344 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8345 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8346 {
8347 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8348 rcStrict = VINF_SUCCESS;
8349 }
8350 else
8351 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8352 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8353
8354 return rcStrict;
8355}
8356
8357
8358/**
8359 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8360 */
8361HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8362{
8363 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8364
8365 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8366
8367 /*
8368 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8369 * Although we don't need to fetch the base as it will be overwritten shortly, while
8370 * loading guest-state we would also load the entire segment register including limit
8371 * and attributes and thus we need to load them here.
8372 */
8373 /** @todo Optimize this: We currently drag in the whole MSR state
8374 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8375 * MSRs required. That would require changes to IEM and possibly CPUM too.
8376 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8377 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8378 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8379 int rc;
8380 switch (idMsr)
8381 {
8382 default:
8383 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8384 __FUNCTION__);
8385 AssertRCReturn(rc, rc);
8386 break;
8387
8388 case MSR_K8_FS_BASE:
8389 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8390 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8391 AssertRCReturn(rc, rc);
8392 break;
8393 case MSR_K8_GS_BASE:
8394 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8395 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8396 AssertRCReturn(rc, rc);
8397 break;
8398 }
8399 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8400
8401 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8402 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8403
8404 if (rcStrict == VINF_SUCCESS)
8405 {
8406 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8407
8408 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8409 if ( idMsr == MSR_IA32_APICBASE
8410 || ( idMsr >= MSR_IA32_X2APIC_START
8411 && idMsr <= MSR_IA32_X2APIC_END))
8412 {
8413 /*
8414 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8415 * When full APIC register virtualization is implemented we'll have to make
8416 * sure APIC state is saved from the VMCS before IEM changes it.
8417 */
8418 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8419 }
8420 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8421 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8422 else if (idMsr == MSR_K6_EFER)
8423 {
8424 /*
8425 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8426 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8427 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8428 */
8429 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8430 }
8431
8432 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8433 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8434 {
8435 switch (idMsr)
8436 {
8437 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8438 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8439 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8440 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8441 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8442 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8443 default:
8444 {
8445#ifndef IN_NEM_DARWIN
8446 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8447 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8448 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8449 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8450#else
8451 AssertMsgFailed(("TODO\n"));
8452#endif
8453 break;
8454 }
8455 }
8456 }
8457#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8458 else
8459 {
8460 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8461 switch (idMsr)
8462 {
8463 case MSR_IA32_SYSENTER_CS:
8464 case MSR_IA32_SYSENTER_EIP:
8465 case MSR_IA32_SYSENTER_ESP:
8466 case MSR_K8_FS_BASE:
8467 case MSR_K8_GS_BASE:
8468 {
8469 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8470 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8471 }
8472
8473 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8474 default:
8475 {
8476 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8477 {
8478 /* EFER MSR writes are always intercepted. */
8479 if (idMsr != MSR_K6_EFER)
8480 {
8481 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8482 idMsr));
8483 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8484 }
8485 }
8486
8487 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8488 {
8489 Assert(pVmcsInfo->pvMsrBitmap);
8490 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8491 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8492 {
8493 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8494 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8495 }
8496 }
8497 break;
8498 }
8499 }
8500 }
8501#endif /* VBOX_STRICT */
8502 }
8503 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8504 {
8505 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8506 rcStrict = VINF_SUCCESS;
8507 }
8508 else
8509 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8510 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8511
8512 return rcStrict;
8513}
8514
8515
8516/**
8517 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8518 */
8519HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8520{
8521 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8522
8523 /** @todo The guest has likely hit a contended spinlock. We might want to
8524 * poke a schedule different guest VCPU. */
8525 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8526 if (RT_SUCCESS(rc))
8527 return VINF_EM_RAW_INTERRUPT;
8528
8529 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8530 return rc;
8531}
8532
8533
8534/**
8535 * VM-exit handler for when the TPR value is lowered below the specified
8536 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8537 */
8538HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8539{
8540 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8541 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8542
8543 /*
8544 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8545 * We'll re-evaluate pending interrupts and inject them before the next VM
8546 * entry so we can just continue execution here.
8547 */
8548 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8549 return VINF_SUCCESS;
8550}
8551
8552
8553/**
8554 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8555 * VM-exit.
8556 *
8557 * @retval VINF_SUCCESS when guest execution can continue.
8558 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8559 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8560 * incompatible guest state for VMX execution (real-on-v86 case).
8561 */
8562HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8563{
8564 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8565 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8566
8567 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8568 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8569 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8570
8571 VBOXSTRICTRC rcStrict;
8572 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8573 uint64_t const uExitQual = pVmxTransient->uExitQual;
8574 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8575 switch (uAccessType)
8576 {
8577 /*
8578 * MOV to CRx.
8579 */
8580 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8581 {
8582 /*
8583 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8584 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8585 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8586 * PAE PDPTEs as well.
8587 */
8588 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8589 AssertRCReturn(rc, rc);
8590
8591 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8592#ifndef IN_NEM_DARWIN
8593 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8594#endif
8595 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8596 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8597
8598 /*
8599 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8600 * - When nested paging isn't used.
8601 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8602 * - We are executing in the VM debug loop.
8603 */
8604#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8605# ifndef IN_NEM_DARWIN
8606 Assert( iCrReg != 3
8607 || !VM_IS_VMX_NESTED_PAGING(pVM)
8608 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8609 || pVCpu->hmr0.s.fUsingDebugLoop);
8610# else
8611 Assert( iCrReg != 3
8612 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8613# endif
8614#endif
8615
8616 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8617 Assert( iCrReg != 8
8618 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8619
8620 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8621 AssertMsg( rcStrict == VINF_SUCCESS
8622 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8623
8624#ifndef IN_NEM_DARWIN
8625 /*
8626 * This is a kludge for handling switches back to real mode when we try to use
8627 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8628 * deal with special selector values, so we have to return to ring-3 and run
8629 * there till the selector values are V86 mode compatible.
8630 *
8631 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8632 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8633 * this function.
8634 */
8635 if ( iCrReg == 0
8636 && rcStrict == VINF_SUCCESS
8637 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8638 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8639 && (uOldCr0 & X86_CR0_PE)
8640 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8641 {
8642 /** @todo Check selectors rather than returning all the time. */
8643 Assert(!pVmxTransient->fIsNestedGuest);
8644 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8645 rcStrict = VINF_EM_RESCHEDULE_REM;
8646 }
8647#endif
8648
8649 break;
8650 }
8651
8652 /*
8653 * MOV from CRx.
8654 */
8655 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8656 {
8657 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8658 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8659
8660 /*
8661 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8662 * - When nested paging isn't used.
8663 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8664 * - We are executing in the VM debug loop.
8665 */
8666#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8667# ifndef IN_NEM_DARWIN
8668 Assert( iCrReg != 3
8669 || !VM_IS_VMX_NESTED_PAGING(pVM)
8670 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8671 || pVCpu->hmr0.s.fLeaveDone);
8672# else
8673 Assert( iCrReg != 3
8674 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8675# endif
8676#endif
8677
8678 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8679 Assert( iCrReg != 8
8680 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8681
8682 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8683 break;
8684 }
8685
8686 /*
8687 * CLTS (Clear Task-Switch Flag in CR0).
8688 */
8689 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8690 {
8691 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8692 break;
8693 }
8694
8695 /*
8696 * LMSW (Load Machine-Status Word into CR0).
8697 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8698 */
8699 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8700 {
8701 RTGCPTR GCPtrEffDst;
8702 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8703 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8704 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8705 if (fMemOperand)
8706 {
8707 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8708 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8709 }
8710 else
8711 GCPtrEffDst = NIL_RTGCPTR;
8712 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8713 break;
8714 }
8715
8716 default:
8717 {
8718 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8719 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8720 }
8721 }
8722
8723 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8724 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8725 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8726
8727 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8728 NOREF(pVM);
8729 return rcStrict;
8730}
8731
8732
8733/**
8734 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8735 * VM-exit.
8736 */
8737HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8738{
8739 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8740 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8741
8742 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8743 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8744 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8745 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8746#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8747 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8748 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8749 AssertRCReturn(rc, rc);
8750
8751 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8752 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8753 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8754 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8755 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8756 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8757 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8758 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8759
8760 /*
8761 * Update exit history to see if this exit can be optimized.
8762 */
8763 VBOXSTRICTRC rcStrict;
8764 PCEMEXITREC pExitRec = NULL;
8765 if ( !fGstStepping
8766 && !fDbgStepping)
8767 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8768 !fIOString
8769 ? !fIOWrite
8770 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8771 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8772 : !fIOWrite
8773 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8774 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8775 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8776 if (!pExitRec)
8777 {
8778 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8779 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8780
8781 uint32_t const cbValue = s_aIOSizes[uIOSize];
8782 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8783 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8784 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8785 if (fIOString)
8786 {
8787 /*
8788 * INS/OUTS - I/O String instruction.
8789 *
8790 * Use instruction-information if available, otherwise fall back on
8791 * interpreting the instruction.
8792 */
8793 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8794 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8795 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8796 if (fInsOutsInfo)
8797 {
8798 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8799 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8800 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8801 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8802 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8803 if (fIOWrite)
8804 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8805 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8806 else
8807 {
8808 /*
8809 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8810 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8811 * See Intel Instruction spec. for "INS".
8812 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8813 */
8814 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8815 }
8816 }
8817 else
8818 rcStrict = IEMExecOne(pVCpu);
8819
8820 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8821 fUpdateRipAlready = true;
8822 }
8823 else
8824 {
8825 /*
8826 * IN/OUT - I/O instruction.
8827 */
8828 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8829 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8830 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8831 if (fIOWrite)
8832 {
8833 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8834 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8835#ifndef IN_NEM_DARWIN
8836 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8837 && !pCtx->eflags.Bits.u1TF)
8838 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8839#endif
8840 }
8841 else
8842 {
8843 uint32_t u32Result = 0;
8844 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8845 if (IOM_SUCCESS(rcStrict))
8846 {
8847 /* Save result of I/O IN instr. in AL/AX/EAX. */
8848 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8849 }
8850#ifndef IN_NEM_DARWIN
8851 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8852 && !pCtx->eflags.Bits.u1TF)
8853 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8854#endif
8855 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8856 }
8857 }
8858
8859 if (IOM_SUCCESS(rcStrict))
8860 {
8861 if (!fUpdateRipAlready)
8862 {
8863 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8864 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8865 }
8866
8867 /*
8868 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8869 * while booting Fedora 17 64-bit guest.
8870 *
8871 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8872 */
8873 if (fIOString)
8874 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8875
8876 /*
8877 * If any I/O breakpoints are armed, we need to check if one triggered
8878 * and take appropriate action.
8879 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8880 */
8881#if 1
8882 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
8883#else
8884 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
8885 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
8886 AssertRCReturn(rc, rc);
8887#endif
8888
8889 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8890 * execution engines about whether hyper BPs and such are pending. */
8891 uint32_t const uDr7 = pCtx->dr[7];
8892 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8893 && X86_DR7_ANY_RW_IO(uDr7)
8894 && (pCtx->cr4 & X86_CR4_DE))
8895 || DBGFBpIsHwIoArmed(pVM)))
8896 {
8897 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8898
8899#ifndef IN_NEM_DARWIN
8900 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8901 VMMRZCallRing3Disable(pVCpu);
8902 HM_DISABLE_PREEMPT(pVCpu);
8903
8904 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8905
8906 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8907 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8908 {
8909 /* Raise #DB. */
8910 if (fIsGuestDbgActive)
8911 ASMSetDR6(pCtx->dr[6]);
8912 if (pCtx->dr[7] != uDr7)
8913 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8914
8915 vmxHCSetPendingXcptDB(pVCpu);
8916 }
8917 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8918 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8919 else if ( rcStrict2 != VINF_SUCCESS
8920 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8921 rcStrict = rcStrict2;
8922 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8923
8924 HM_RESTORE_PREEMPT();
8925 VMMRZCallRing3Enable(pVCpu);
8926#else
8927 /** @todo */
8928#endif
8929 }
8930 }
8931
8932#ifdef VBOX_STRICT
8933 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8934 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8935 Assert(!fIOWrite);
8936 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8937 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8938 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8939 Assert(fIOWrite);
8940 else
8941 {
8942# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8943 * statuses, that the VMM device and some others may return. See
8944 * IOM_SUCCESS() for guidance. */
8945 AssertMsg( RT_FAILURE(rcStrict)
8946 || rcStrict == VINF_SUCCESS
8947 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8948 || rcStrict == VINF_EM_DBG_BREAKPOINT
8949 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8950 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8951# endif
8952 }
8953#endif
8954 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8955 }
8956 else
8957 {
8958 /*
8959 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8960 */
8961 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
8962 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8963 AssertRCReturn(rc2, rc2);
8964 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8965 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8966 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8967 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8968 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8969 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8970
8971 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8972 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8973
8974 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8975 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8976 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8977 }
8978 return rcStrict;
8979}
8980
8981
8982/**
8983 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8984 * VM-exit.
8985 */
8986HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8987{
8988 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8989
8990 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8991 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
8992 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8993 {
8994 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
8995 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8996 {
8997 uint32_t uErrCode;
8998 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8999 {
9000 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9001 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
9002 }
9003 else
9004 uErrCode = 0;
9005
9006 RTGCUINTPTR GCPtrFaultAddress;
9007 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
9008 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
9009 else
9010 GCPtrFaultAddress = 0;
9011
9012 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9013
9014 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
9015 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
9016
9017 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
9018 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
9019 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9020 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9021 }
9022 }
9023
9024 /* Fall back to the interpreter to emulate the task-switch. */
9025 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9026 return VERR_EM_INTERPRETER;
9027}
9028
9029
9030/**
9031 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9032 */
9033HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9034{
9035 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9036
9037 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9038 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9039 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9040 AssertRC(rc);
9041 return VINF_EM_DBG_STEPPED;
9042}
9043
9044
9045/**
9046 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9047 */
9048HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9049{
9050 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9051 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9052
9053 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9054 | HMVMX_READ_EXIT_INSTR_LEN
9055 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9056 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9057 | HMVMX_READ_IDT_VECTORING_INFO
9058 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9059
9060 /*
9061 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9062 */
9063 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9064 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9065 {
9066 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9067 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9068 {
9069 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9070 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9071 }
9072 }
9073 else
9074 {
9075 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9076 return rcStrict;
9077 }
9078
9079 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9080 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9081 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9082 AssertRCReturn(rc, rc);
9083
9084 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9085 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9086 switch (uAccessType)
9087 {
9088#ifndef IN_NEM_DARWIN
9089 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9090 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9091 {
9092 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9093 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9094 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9095
9096 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9097 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9098 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9099 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9100 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9101
9102 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9103 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9104 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9105 if ( rcStrict == VINF_SUCCESS
9106 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9107 || rcStrict == VERR_PAGE_NOT_PRESENT)
9108 {
9109 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9110 | HM_CHANGED_GUEST_APIC_TPR);
9111 rcStrict = VINF_SUCCESS;
9112 }
9113 break;
9114 }
9115#else
9116 /** @todo */
9117#endif
9118
9119 default:
9120 {
9121 Log4Func(("uAccessType=%#x\n", uAccessType));
9122 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9123 break;
9124 }
9125 }
9126
9127 if (rcStrict != VINF_SUCCESS)
9128 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9129 return rcStrict;
9130}
9131
9132
9133/**
9134 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9135 * VM-exit.
9136 */
9137HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9138{
9139 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9140 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9141
9142 /*
9143 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9144 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9145 * must emulate the MOV DRx access.
9146 */
9147 if (!pVmxTransient->fIsNestedGuest)
9148 {
9149 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9150 if (pVmxTransient->fWasGuestDebugStateActive)
9151 {
9152 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9153 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9154 }
9155
9156 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9157 && !pVmxTransient->fWasHyperDebugStateActive)
9158 {
9159 Assert(!DBGFIsStepping(pVCpu));
9160 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9161
9162 /* Don't intercept MOV DRx any more. */
9163 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9164 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9165 AssertRC(rc);
9166
9167#ifndef IN_NEM_DARWIN
9168 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9169 VMMRZCallRing3Disable(pVCpu);
9170 HM_DISABLE_PREEMPT(pVCpu);
9171
9172 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9173 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9174 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9175
9176 HM_RESTORE_PREEMPT();
9177 VMMRZCallRing3Enable(pVCpu);
9178#else
9179 CPUMR3NemActivateGuestDebugState(pVCpu);
9180 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9181 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9182#endif
9183
9184#ifdef VBOX_WITH_STATISTICS
9185 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9186 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9187 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9188 else
9189 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9190#endif
9191 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9192 return VINF_SUCCESS;
9193 }
9194 }
9195
9196 /*
9197 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
9198 * The EFER MSR is always up-to-date.
9199 * Update the segment registers and DR7 from the CPU.
9200 */
9201 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9202 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9203 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9204 AssertRCReturn(rc, rc);
9205 Log4Func(("cs:rip=%#04x:%08RX64\n", pCtx->cs.Sel, pCtx->rip));
9206
9207 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9208 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9209 {
9210 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
9211 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
9212 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
9213 if (RT_SUCCESS(rc))
9214 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
9215 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9216 }
9217 else
9218 {
9219 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
9220 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
9221 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
9222 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9223 }
9224
9225 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
9226 if (RT_SUCCESS(rc))
9227 {
9228 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
9229 AssertRCReturn(rc2, rc2);
9230 return VINF_SUCCESS;
9231 }
9232 return rc;
9233}
9234
9235
9236/**
9237 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9238 * Conditional VM-exit.
9239 */
9240HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9241{
9242 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9243
9244#ifndef IN_NEM_DARWIN
9245 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9246
9247 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9248 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9249 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9250 | HMVMX_READ_IDT_VECTORING_INFO
9251 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9252 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9253
9254 /*
9255 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9256 */
9257 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9258 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9259 {
9260 /*
9261 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9262 * instruction emulation to inject the original event. Otherwise, injecting the original event
9263 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9264 */
9265 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9266 { /* likely */ }
9267 else
9268 {
9269 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9270# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9271 /** @todo NSTVMX: Think about how this should be handled. */
9272 if (pVmxTransient->fIsNestedGuest)
9273 return VERR_VMX_IPE_3;
9274# endif
9275 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9276 }
9277 }
9278 else
9279 {
9280 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9281 return rcStrict;
9282 }
9283
9284 /*
9285 * Get sufficient state and update the exit history entry.
9286 */
9287 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9288 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9289 AssertRCReturn(rc, rc);
9290
9291 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9292 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9293 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9294 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9295 if (!pExitRec)
9296 {
9297 /*
9298 * If we succeed, resume guest execution.
9299 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9300 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9301 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9302 * weird case. See @bugref{6043}.
9303 */
9304 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9305 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9306/** @todo bird: We can probably just go straight to IOM here and assume that
9307 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9308 * well. However, we need to address that aliasing workarounds that
9309 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9310 *
9311 * Might also be interesting to see if we can get this done more or
9312 * less locklessly inside IOM. Need to consider the lookup table
9313 * updating and use a bit more carefully first (or do all updates via
9314 * rendezvous) */
9315 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
9316 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
9317 if ( rcStrict == VINF_SUCCESS
9318 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9319 || rcStrict == VERR_PAGE_NOT_PRESENT)
9320 {
9321 /* Successfully handled MMIO operation. */
9322 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9323 | HM_CHANGED_GUEST_APIC_TPR);
9324 rcStrict = VINF_SUCCESS;
9325 }
9326 }
9327 else
9328 {
9329 /*
9330 * Frequent exit or something needing probing. Call EMHistoryExec.
9331 */
9332 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9333 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9334
9335 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9336 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9337
9338 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9339 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9340 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9341 }
9342 return rcStrict;
9343#else
9344 AssertFailed();
9345 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9346#endif
9347}
9348
9349
9350/**
9351 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9352 * VM-exit.
9353 */
9354HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9355{
9356 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9357#ifndef IN_NEM_DARWIN
9358 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9359
9360 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9361 | HMVMX_READ_EXIT_INSTR_LEN
9362 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9363 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9364 | HMVMX_READ_IDT_VECTORING_INFO
9365 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9366 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9367
9368 /*
9369 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9370 */
9371 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9372 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9373 {
9374 /*
9375 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9376 * we shall resolve the nested #PF and re-inject the original event.
9377 */
9378 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9379 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9380 }
9381 else
9382 {
9383 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9384 return rcStrict;
9385 }
9386
9387 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9388 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9389 AssertRCReturn(rc, rc);
9390
9391 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9392 uint64_t const uExitQual = pVmxTransient->uExitQual;
9393 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9394
9395 RTGCUINT uErrorCode = 0;
9396 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9397 uErrorCode |= X86_TRAP_PF_ID;
9398 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9399 uErrorCode |= X86_TRAP_PF_RW;
9400 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9401 uErrorCode |= X86_TRAP_PF_P;
9402
9403 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9404 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9405
9406 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9407
9408 /*
9409 * Handle the pagefault trap for the nested shadow table.
9410 */
9411 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9412 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
9413 TRPMResetTrap(pVCpu);
9414
9415 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9416 if ( rcStrict == VINF_SUCCESS
9417 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9418 || rcStrict == VERR_PAGE_NOT_PRESENT)
9419 {
9420 /* Successfully synced our nested page tables. */
9421 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9422 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9423 return VINF_SUCCESS;
9424 }
9425 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9426 return rcStrict;
9427
9428#else /* IN_NEM_DARWIN */
9429 PVM pVM = pVCpu->CTX_SUFF(pVM);
9430 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9431 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9432 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9433 vmxHCImportGuestRip(pVCpu);
9434 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9435
9436 /*
9437 * Ask PGM for information about the given GCPhys. We need to check if we're
9438 * out of sync first.
9439 */
9440 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
9441 PGMPHYSNEMPAGEINFO Info;
9442 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9443 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9444 if (RT_SUCCESS(rc))
9445 {
9446 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9447 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9448 {
9449 if (State.fCanResume)
9450 {
9451 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9452 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9453 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9454 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9455 State.fDidSomething ? "" : " no-change"));
9456 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9457 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9458 return VINF_SUCCESS;
9459 }
9460 }
9461
9462 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9463 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9464 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9465 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9466 State.fDidSomething ? "" : " no-change"));
9467 }
9468 else
9469 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9470 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9471 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9472
9473 /*
9474 * Emulate the memory access, either access handler or special memory.
9475 */
9476 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9477 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9478 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9479 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9480 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9481
9482 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9483 AssertRCReturn(rc, rc);
9484
9485 VBOXSTRICTRC rcStrict;
9486 if (!pExitRec)
9487 rcStrict = IEMExecOne(pVCpu);
9488 else
9489 {
9490 /* Frequent access or probing. */
9491 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9492 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9493 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9494 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9495 }
9496
9497 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9498
9499 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9500 return rcStrict;
9501#endif /* IN_NEM_DARWIN */
9502}
9503
9504#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9505
9506/**
9507 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9508 */
9509HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9510{
9511 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9512
9513 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9514 | HMVMX_READ_EXIT_INSTR_INFO
9515 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9516 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9517 | CPUMCTX_EXTRN_SREG_MASK
9518 | CPUMCTX_EXTRN_HWVIRT
9519 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9520 AssertRCReturn(rc, rc);
9521
9522 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9523
9524 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9525 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9526
9527 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9528 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9529 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9530 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9531 {
9532 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9533 rcStrict = VINF_SUCCESS;
9534 }
9535 return rcStrict;
9536}
9537
9538
9539/**
9540 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9541 */
9542HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9543{
9544 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9545
9546 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9547 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9548 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9549 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9550 AssertRCReturn(rc, rc);
9551
9552 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9553
9554 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9555 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9556 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9557 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9558 {
9559 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9560 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9561 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9562 }
9563 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9564 return rcStrict;
9565}
9566
9567
9568/**
9569 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9570 */
9571HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9572{
9573 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9574
9575 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9576 | HMVMX_READ_EXIT_INSTR_INFO
9577 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9578 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9579 | CPUMCTX_EXTRN_SREG_MASK
9580 | CPUMCTX_EXTRN_HWVIRT
9581 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9582 AssertRCReturn(rc, rc);
9583
9584 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9585
9586 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9587 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9588
9589 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9590 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9591 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9592 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9593 {
9594 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9595 rcStrict = VINF_SUCCESS;
9596 }
9597 return rcStrict;
9598}
9599
9600
9601/**
9602 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9603 */
9604HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9605{
9606 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9607
9608 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9609 | HMVMX_READ_EXIT_INSTR_INFO
9610 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9611 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9612 | CPUMCTX_EXTRN_SREG_MASK
9613 | CPUMCTX_EXTRN_HWVIRT
9614 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9615 AssertRCReturn(rc, rc);
9616
9617 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9618
9619 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9620 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9621
9622 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9623 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9624 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9625 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9626 {
9627 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9628 rcStrict = VINF_SUCCESS;
9629 }
9630 return rcStrict;
9631}
9632
9633
9634/**
9635 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9636 */
9637HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9638{
9639 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9640
9641 /*
9642 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9643 * thus might not need to import the shadow VMCS state, it's safer just in case
9644 * code elsewhere dares look at unsynced VMCS fields.
9645 */
9646 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9647 | HMVMX_READ_EXIT_INSTR_INFO
9648 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9649 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9650 | CPUMCTX_EXTRN_SREG_MASK
9651 | CPUMCTX_EXTRN_HWVIRT
9652 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9653 AssertRCReturn(rc, rc);
9654
9655 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9656
9657 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9658 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9659 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9660
9661 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9662 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9663 {
9664 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9665
9666# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9667 /* Try for exit optimization. This is on the following instruction
9668 because it would be a waste of time to have to reinterpret the
9669 already decoded vmwrite instruction. */
9670 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9671 if (pExitRec)
9672 {
9673 /* Frequent access or probing. */
9674 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9675 AssertRCReturn(rc, rc);
9676
9677 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9678 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9679 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9680 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9681 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9682 }
9683# endif
9684 }
9685 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9686 {
9687 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9688 rcStrict = VINF_SUCCESS;
9689 }
9690 return rcStrict;
9691}
9692
9693
9694/**
9695 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9696 */
9697HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9698{
9699 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9700
9701 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9702 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9703 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9704 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9705 AssertRCReturn(rc, rc);
9706
9707 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9708
9709 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9710 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9711 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9712 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9713 {
9714 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9715 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9716 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9717 }
9718 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9719 return rcStrict;
9720}
9721
9722
9723/**
9724 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9725 */
9726HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9727{
9728 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9729
9730 /*
9731 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9732 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9733 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9734 */
9735 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9736 | HMVMX_READ_EXIT_INSTR_INFO
9737 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9738 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9739 | CPUMCTX_EXTRN_SREG_MASK
9740 | CPUMCTX_EXTRN_HWVIRT
9741 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9742 AssertRCReturn(rc, rc);
9743
9744 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9745
9746 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9747 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9748 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9749
9750 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9751 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9752 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9753 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9754 {
9755 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9756 rcStrict = VINF_SUCCESS;
9757 }
9758 return rcStrict;
9759}
9760
9761
9762/**
9763 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9764 */
9765HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9766{
9767 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9768
9769 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9770 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9771 | CPUMCTX_EXTRN_HWVIRT
9772 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9773 AssertRCReturn(rc, rc);
9774
9775 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9776
9777 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9778 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9779 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9780 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9781 {
9782 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9783 rcStrict = VINF_SUCCESS;
9784 }
9785 return rcStrict;
9786}
9787
9788
9789/**
9790 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9791 */
9792HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9793{
9794 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9795
9796 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9797 | HMVMX_READ_EXIT_INSTR_INFO
9798 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9799 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9800 | CPUMCTX_EXTRN_SREG_MASK
9801 | CPUMCTX_EXTRN_HWVIRT
9802 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9803 AssertRCReturn(rc, rc);
9804
9805 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9806
9807 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9808 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9809
9810 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9811 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9812 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9813 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9814 {
9815 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9816 rcStrict = VINF_SUCCESS;
9817 }
9818 return rcStrict;
9819}
9820
9821
9822/**
9823 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9824 */
9825HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9826{
9827 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9828
9829 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9830 | HMVMX_READ_EXIT_INSTR_INFO
9831 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9832 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9833 | CPUMCTX_EXTRN_SREG_MASK
9834 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9835 AssertRCReturn(rc, rc);
9836
9837 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9838
9839 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9840 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9841
9842 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9843 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9844 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9845 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9846 {
9847 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9848 rcStrict = VINF_SUCCESS;
9849 }
9850 return rcStrict;
9851}
9852
9853
9854# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9855/**
9856 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9857 */
9858HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9859{
9860 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9861
9862 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9863 | HMVMX_READ_EXIT_INSTR_INFO
9864 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9865 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9866 | CPUMCTX_EXTRN_SREG_MASK
9867 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9868 AssertRCReturn(rc, rc);
9869
9870 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9871
9872 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9873 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9874
9875 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9876 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9877 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9878 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9879 {
9880 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9881 rcStrict = VINF_SUCCESS;
9882 }
9883 return rcStrict;
9884}
9885# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9886#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9887/** @} */
9888
9889
9890#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9891/** @name Nested-guest VM-exit handlers.
9892 * @{
9893 */
9894/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9895/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9896/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9897
9898/**
9899 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9900 * Conditional VM-exit.
9901 */
9902HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9903{
9904 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9905
9906 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
9907
9908 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9909 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9910 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9911
9912 switch (uExitIntType)
9913 {
9914# ifndef IN_NEM_DARWIN
9915 /*
9916 * Physical NMIs:
9917 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9918 */
9919 case VMX_EXIT_INT_INFO_TYPE_NMI:
9920 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9921# endif
9922
9923 /*
9924 * Hardware exceptions,
9925 * Software exceptions,
9926 * Privileged software exceptions:
9927 * Figure out if the exception must be delivered to the guest or the nested-guest.
9928 */
9929 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9930 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9931 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9932 {
9933 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9934 | HMVMX_READ_EXIT_INSTR_LEN
9935 | HMVMX_READ_IDT_VECTORING_INFO
9936 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9937
9938 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9939 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
9940 {
9941 /* Exit qualification is required for debug and page-fault exceptions. */
9942 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9943
9944 /*
9945 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9946 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9947 * length. However, if delivery of a software interrupt, software exception or privileged
9948 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9949 */
9950 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9951 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
9952 pVmxTransient->uExitIntErrorCode,
9953 pVmxTransient->uIdtVectoringInfo,
9954 pVmxTransient->uIdtVectoringErrorCode);
9955#ifdef DEBUG_ramshankar
9956 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9957 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
9958 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9959 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9960 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
9961 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9962#endif
9963 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9964 }
9965
9966 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9967 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9968 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9969 }
9970
9971 /*
9972 * Software interrupts:
9973 * VM-exits cannot be caused by software interrupts.
9974 *
9975 * External interrupts:
9976 * This should only happen when "acknowledge external interrupts on VM-exit"
9977 * control is set. However, we never set this when executing a guest or
9978 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9979 * the guest.
9980 */
9981 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9982 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9983 default:
9984 {
9985 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9986 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9987 }
9988 }
9989}
9990
9991
9992/**
9993 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
9994 * Unconditional VM-exit.
9995 */
9996HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9997{
9998 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9999 return IEMExecVmxVmexitTripleFault(pVCpu);
10000}
10001
10002
10003/**
10004 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10005 */
10006HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10007{
10008 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10009
10010 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10011 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10012 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10013}
10014
10015
10016/**
10017 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10018 */
10019HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10020{
10021 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10022
10023 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10024 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10025 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10026}
10027
10028
10029/**
10030 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10031 * Unconditional VM-exit.
10032 */
10033HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10034{
10035 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10036
10037 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10038 | HMVMX_READ_EXIT_INSTR_LEN
10039 | HMVMX_READ_IDT_VECTORING_INFO
10040 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10041
10042 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10043 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10044 pVmxTransient->uIdtVectoringErrorCode);
10045 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10046}
10047
10048
10049/**
10050 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10051 */
10052HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10053{
10054 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10055
10056 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10057 {
10058 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10059 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10060 }
10061 return vmxHCExitHlt(pVCpu, pVmxTransient);
10062}
10063
10064
10065/**
10066 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10067 */
10068HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10069{
10070 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10071
10072 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10073 {
10074 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10075 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10076 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10077 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10078 }
10079 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10080}
10081
10082
10083/**
10084 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10085 */
10086HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10087{
10088 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10089
10090 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10091 {
10092 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10093 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10094 }
10095 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10096}
10097
10098
10099/**
10100 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10101 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10102 */
10103HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10104{
10105 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10106
10107 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10108 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10109
10110 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10111
10112 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10113 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10114 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10115
10116 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10117 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10118 u64VmcsField &= UINT64_C(0xffffffff);
10119
10120 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10121 {
10122 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10123 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10124 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10125 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10126 }
10127
10128 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10129 return vmxHCExitVmread(pVCpu, pVmxTransient);
10130 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10131}
10132
10133
10134/**
10135 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10136 */
10137HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10138{
10139 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10140
10141 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10142 {
10143 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10144 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10145 }
10146
10147 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10148}
10149
10150
10151/**
10152 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10153 * Conditional VM-exit.
10154 */
10155HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10156{
10157 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10158
10159 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10160 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10161
10162 VBOXSTRICTRC rcStrict;
10163 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10164 switch (uAccessType)
10165 {
10166 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10167 {
10168 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10169 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10170 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10171 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10172
10173 bool fIntercept;
10174 switch (iCrReg)
10175 {
10176 case 0:
10177 case 4:
10178 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10179 break;
10180
10181 case 3:
10182 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10183 break;
10184
10185 case 8:
10186 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10187 break;
10188
10189 default:
10190 fIntercept = false;
10191 break;
10192 }
10193 if (fIntercept)
10194 {
10195 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10196 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10197 }
10198 else
10199 {
10200 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10201 AssertRCReturn(rc, rc);
10202 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10203 }
10204 break;
10205 }
10206
10207 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10208 {
10209 /*
10210 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10211 * CR2 reads do not cause a VM-exit.
10212 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10213 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10214 */
10215 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10216 if ( iCrReg == 3
10217 || iCrReg == 8)
10218 {
10219 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10220 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10221 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10222 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10223 {
10224 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10225 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10226 }
10227 else
10228 {
10229 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10230 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10231 }
10232 }
10233 else
10234 {
10235 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10236 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10237 }
10238 break;
10239 }
10240
10241 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10242 {
10243 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10244 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10245 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10246 if ( (uGstHostMask & X86_CR0_TS)
10247 && (uReadShadow & X86_CR0_TS))
10248 {
10249 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10250 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10251 }
10252 else
10253 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10254 break;
10255 }
10256
10257 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10258 {
10259 RTGCPTR GCPtrEffDst;
10260 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10261 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10262 if (fMemOperand)
10263 {
10264 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10265 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10266 }
10267 else
10268 GCPtrEffDst = NIL_RTGCPTR;
10269
10270 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10271 {
10272 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10273 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10274 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10275 }
10276 else
10277 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10278 break;
10279 }
10280
10281 default:
10282 {
10283 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10284 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10285 }
10286 }
10287
10288 if (rcStrict == VINF_IEM_RAISED_XCPT)
10289 {
10290 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10291 rcStrict = VINF_SUCCESS;
10292 }
10293 return rcStrict;
10294}
10295
10296
10297/**
10298 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10299 * Conditional VM-exit.
10300 */
10301HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10302{
10303 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10304
10305 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10306 {
10307 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10308 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10309 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10310 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10311 }
10312 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10313}
10314
10315
10316/**
10317 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10318 * Conditional VM-exit.
10319 */
10320HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10321{
10322 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10323
10324 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10325
10326 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10327 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10328 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10329
10330 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10331 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10332 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10333 {
10334 /*
10335 * IN/OUT instruction:
10336 * - Provides VM-exit instruction length.
10337 *
10338 * INS/OUTS instruction:
10339 * - Provides VM-exit instruction length.
10340 * - Provides Guest-linear address.
10341 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10342 */
10343 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10344 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10345
10346 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10347 pVmxTransient->ExitInstrInfo.u = 0;
10348 pVmxTransient->uGuestLinearAddr = 0;
10349
10350 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10351 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10352 if (fIOString)
10353 {
10354 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10355 if (fVmxInsOutsInfo)
10356 {
10357 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10358 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10359 }
10360 }
10361
10362 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10363 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10364 }
10365 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10366}
10367
10368
10369/**
10370 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10371 */
10372HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10373{
10374 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10375
10376 uint32_t fMsrpm;
10377 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10378 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10379 else
10380 fMsrpm = VMXMSRPM_EXIT_RD;
10381
10382 if (fMsrpm & VMXMSRPM_EXIT_RD)
10383 {
10384 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10385 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10386 }
10387 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10388}
10389
10390
10391/**
10392 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10393 */
10394HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10395{
10396 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10397
10398 uint32_t fMsrpm;
10399 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10400 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10401 else
10402 fMsrpm = VMXMSRPM_EXIT_WR;
10403
10404 if (fMsrpm & VMXMSRPM_EXIT_WR)
10405 {
10406 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10407 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10408 }
10409 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10410}
10411
10412
10413/**
10414 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10415 */
10416HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10417{
10418 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10419
10420 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10421 {
10422 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10423 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10424 }
10425 return vmxHCExitMwait(pVCpu, pVmxTransient);
10426}
10427
10428
10429/**
10430 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10431 * VM-exit.
10432 */
10433HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10434{
10435 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10436
10437 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10438 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10439 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10440 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10441}
10442
10443
10444/**
10445 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10446 */
10447HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10448{
10449 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10450
10451 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10452 {
10453 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10454 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10455 }
10456 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10457}
10458
10459
10460/**
10461 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10462 */
10463HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10464{
10465 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10466
10467 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10468 * PAUSE when executing a nested-guest? If it does not, we would not need
10469 * to check for the intercepts here. Just call VM-exit... */
10470
10471 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10472 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10473 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10474 {
10475 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10476 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10477 }
10478 return vmxHCExitPause(pVCpu, pVmxTransient);
10479}
10480
10481
10482/**
10483 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10484 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10485 */
10486HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10487{
10488 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10489
10490 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10491 {
10492 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10493 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10494 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10495 }
10496 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10497}
10498
10499
10500/**
10501 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10502 * VM-exit.
10503 */
10504HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10505{
10506 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10507
10508 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10509 | HMVMX_READ_EXIT_INSTR_LEN
10510 | HMVMX_READ_IDT_VECTORING_INFO
10511 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10512
10513 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10514
10515 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10516 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10517
10518 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10519 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10520 pVmxTransient->uIdtVectoringErrorCode);
10521 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10522}
10523
10524
10525/**
10526 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10527 * Conditional VM-exit.
10528 */
10529HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10530{
10531 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10532
10533 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10534 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10535 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10536}
10537
10538
10539/**
10540 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10541 * Conditional VM-exit.
10542 */
10543HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10544{
10545 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10546
10547 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10548 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10549 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10550}
10551
10552
10553/**
10554 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10555 */
10556HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10557{
10558 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10559
10560 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10561 {
10562 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10563 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10564 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10565 }
10566 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10567}
10568
10569
10570/**
10571 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10572 */
10573HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10574{
10575 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10576
10577 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10578 {
10579 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10580 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10581 }
10582 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10583}
10584
10585
10586/**
10587 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10588 */
10589HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10590{
10591 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10592
10593 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10594 {
10595 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10596 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10597 | HMVMX_READ_EXIT_INSTR_INFO
10598 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10599 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10600 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10601 }
10602 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10603}
10604
10605
10606/**
10607 * Nested-guest VM-exit handler for invalid-guest state
10608 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10609 */
10610HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10611{
10612 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10613
10614 /*
10615 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10616 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10617 * Handle it like it's in an invalid guest state of the outer guest.
10618 *
10619 * When the fast path is implemented, this should be changed to cause the corresponding
10620 * nested-guest VM-exit.
10621 */
10622 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10623}
10624
10625
10626/**
10627 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10628 * and only provide the instruction length.
10629 *
10630 * Unconditional VM-exit.
10631 */
10632HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10633{
10634 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10635
10636#ifdef VBOX_STRICT
10637 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10638 switch (pVmxTransient->uExitReason)
10639 {
10640 case VMX_EXIT_ENCLS:
10641 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10642 break;
10643
10644 case VMX_EXIT_VMFUNC:
10645 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10646 break;
10647 }
10648#endif
10649
10650 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10651 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10652}
10653
10654
10655/**
10656 * Nested-guest VM-exit handler for instructions that provide instruction length as
10657 * well as more information.
10658 *
10659 * Unconditional VM-exit.
10660 */
10661HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10662{
10663 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10664
10665# ifdef VBOX_STRICT
10666 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10667 switch (pVmxTransient->uExitReason)
10668 {
10669 case VMX_EXIT_GDTR_IDTR_ACCESS:
10670 case VMX_EXIT_LDTR_TR_ACCESS:
10671 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10672 break;
10673
10674 case VMX_EXIT_RDRAND:
10675 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10676 break;
10677
10678 case VMX_EXIT_RDSEED:
10679 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10680 break;
10681
10682 case VMX_EXIT_XSAVES:
10683 case VMX_EXIT_XRSTORS:
10684 /** @todo NSTVMX: Verify XSS-bitmap. */
10685 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10686 break;
10687
10688 case VMX_EXIT_UMWAIT:
10689 case VMX_EXIT_TPAUSE:
10690 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10691 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10692 break;
10693
10694 case VMX_EXIT_LOADIWKEY:
10695 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10696 break;
10697 }
10698# endif
10699
10700 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10701 | HMVMX_READ_EXIT_INSTR_LEN
10702 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10703 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10704 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10705}
10706
10707# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10708
10709/**
10710 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10711 * Conditional VM-exit.
10712 */
10713HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10714{
10715 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10716 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10717
10718 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10719 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10720 {
10721 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10722 | HMVMX_READ_EXIT_INSTR_LEN
10723 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10724 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10725 | HMVMX_READ_IDT_VECTORING_INFO
10726 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10727 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10728 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10729 AssertRCReturn(rc, rc);
10730
10731 /*
10732 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10733 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10734 * it's its problem to deal with that issue and we'll clear the recovered event.
10735 */
10736 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10737 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10738 { /*likely*/ }
10739 else
10740 {
10741 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10742 return rcStrict;
10743 }
10744 bool const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10745
10746 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10747 uint64_t const uExitQual = pVmxTransient->uExitQual;
10748
10749 RTGCPTR GCPtrNestedFault;
10750 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10751 if (fIsLinearAddrValid)
10752 {
10753 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10754 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10755 }
10756 else
10757 GCPtrNestedFault = 0;
10758
10759 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10760 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10761 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10762 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10763 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10764
10765 PGMPTWALK Walk;
10766 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10767 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, CPUMCTX2CORE(pCtx),
10768 GCPhysNestedFault, fIsLinearAddrValid, GCPtrNestedFault,
10769 &Walk);
10770 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10771 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10772 if (RT_SUCCESS(rcStrict))
10773 return rcStrict;
10774
10775 if (fClearEventOnForward)
10776 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10777
10778 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10779 pVmxTransient->uIdtVectoringErrorCode);
10780 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10781 {
10782 VMXVEXITINFO const ExitInfo
10783 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10784 pVmxTransient->uExitQual,
10785 pVmxTransient->cbExitInstr,
10786 pVmxTransient->uGuestLinearAddr,
10787 pVmxTransient->uGuestPhysicalAddr);
10788 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10789 }
10790
10791 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10792 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10793 }
10794
10795 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10796}
10797
10798
10799/**
10800 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10801 * Conditional VM-exit.
10802 */
10803HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10804{
10805 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10806 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10807
10808 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10809 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10810 {
10811 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10812 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10813 AssertRCReturn(rc, rc);
10814
10815 PGMPTWALK Walk;
10816 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10817 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10818 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, CPUMCTX2CORE(pCtx),
10819 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10820 0 /* GCPtrNestedFault */, &Walk);
10821 if (RT_SUCCESS(rcStrict))
10822 {
10823 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10824 return rcStrict;
10825 }
10826
10827 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10828 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
10829 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10830
10831 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10832 pVmxTransient->uIdtVectoringErrorCode);
10833 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10834 }
10835
10836 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10837}
10838
10839# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10840
10841/** @} */
10842#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10843
10844
10845/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10846 * probes.
10847 *
10848 * The following few functions and associated structure contains the bloat
10849 * necessary for providing detailed debug events and dtrace probes as well as
10850 * reliable host side single stepping. This works on the principle of
10851 * "subclassing" the normal execution loop and workers. We replace the loop
10852 * method completely and override selected helpers to add necessary adjustments
10853 * to their core operation.
10854 *
10855 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10856 * any performance for debug and analysis features.
10857 *
10858 * @{
10859 */
10860
10861/**
10862 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10863 * the debug run loop.
10864 */
10865typedef struct VMXRUNDBGSTATE
10866{
10867 /** The RIP we started executing at. This is for detecting that we stepped. */
10868 uint64_t uRipStart;
10869 /** The CS we started executing with. */
10870 uint16_t uCsStart;
10871
10872 /** Whether we've actually modified the 1st execution control field. */
10873 bool fModifiedProcCtls : 1;
10874 /** Whether we've actually modified the 2nd execution control field. */
10875 bool fModifiedProcCtls2 : 1;
10876 /** Whether we've actually modified the exception bitmap. */
10877 bool fModifiedXcptBitmap : 1;
10878
10879 /** We desire the modified the CR0 mask to be cleared. */
10880 bool fClearCr0Mask : 1;
10881 /** We desire the modified the CR4 mask to be cleared. */
10882 bool fClearCr4Mask : 1;
10883 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10884 uint32_t fCpe1Extra;
10885 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10886 uint32_t fCpe1Unwanted;
10887 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10888 uint32_t fCpe2Extra;
10889 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10890 uint32_t bmXcptExtra;
10891 /** The sequence number of the Dtrace provider settings the state was
10892 * configured against. */
10893 uint32_t uDtraceSettingsSeqNo;
10894 /** VM-exits to check (one bit per VM-exit). */
10895 uint32_t bmExitsToCheck[3];
10896
10897 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10898 uint32_t fProcCtlsInitial;
10899 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10900 uint32_t fProcCtls2Initial;
10901 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10902 uint32_t bmXcptInitial;
10903} VMXRUNDBGSTATE;
10904AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10905typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10906
10907
10908/**
10909 * Initializes the VMXRUNDBGSTATE structure.
10910 *
10911 * @param pVCpu The cross context virtual CPU structure of the
10912 * calling EMT.
10913 * @param pVmxTransient The VMX-transient structure.
10914 * @param pDbgState The debug state to initialize.
10915 */
10916static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10917{
10918 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10919 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10920
10921 pDbgState->fModifiedProcCtls = false;
10922 pDbgState->fModifiedProcCtls2 = false;
10923 pDbgState->fModifiedXcptBitmap = false;
10924 pDbgState->fClearCr0Mask = false;
10925 pDbgState->fClearCr4Mask = false;
10926 pDbgState->fCpe1Extra = 0;
10927 pDbgState->fCpe1Unwanted = 0;
10928 pDbgState->fCpe2Extra = 0;
10929 pDbgState->bmXcptExtra = 0;
10930 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10931 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10932 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10933}
10934
10935
10936/**
10937 * Updates the VMSC fields with changes requested by @a pDbgState.
10938 *
10939 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10940 * immediately before executing guest code, i.e. when interrupts are disabled.
10941 * We don't check status codes here as we cannot easily assert or return in the
10942 * latter case.
10943 *
10944 * @param pVCpu The cross context virtual CPU structure.
10945 * @param pVmxTransient The VMX-transient structure.
10946 * @param pDbgState The debug state.
10947 */
10948static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10949{
10950 /*
10951 * Ensure desired flags in VMCS control fields are set.
10952 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10953 *
10954 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10955 * there should be no stale data in pCtx at this point.
10956 */
10957 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10958 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10959 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10960 {
10961 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10962 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10963 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10964 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10965 pDbgState->fModifiedProcCtls = true;
10966 }
10967
10968 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
10969 {
10970 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
10971 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
10972 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
10973 pDbgState->fModifiedProcCtls2 = true;
10974 }
10975
10976 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
10977 {
10978 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
10979 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
10980 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
10981 pDbgState->fModifiedXcptBitmap = true;
10982 }
10983
10984 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
10985 {
10986 pVmcsInfo->u64Cr0Mask = 0;
10987 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
10988 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
10989 }
10990
10991 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
10992 {
10993 pVmcsInfo->u64Cr4Mask = 0;
10994 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
10995 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
10996 }
10997
10998 NOREF(pVCpu);
10999}
11000
11001
11002/**
11003 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11004 * re-entry next time around.
11005 *
11006 * @returns Strict VBox status code (i.e. informational status codes too).
11007 * @param pVCpu The cross context virtual CPU structure.
11008 * @param pVmxTransient The VMX-transient structure.
11009 * @param pDbgState The debug state.
11010 * @param rcStrict The return code from executing the guest using single
11011 * stepping.
11012 */
11013static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11014 VBOXSTRICTRC rcStrict)
11015{
11016 /*
11017 * Restore VM-exit control settings as we may not reenter this function the
11018 * next time around.
11019 */
11020 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11021
11022 /* We reload the initial value, trigger what we can of recalculations the
11023 next time around. From the looks of things, that's all that's required atm. */
11024 if (pDbgState->fModifiedProcCtls)
11025 {
11026 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11027 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11028 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11029 AssertRC(rc2);
11030 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11031 }
11032
11033 /* We're currently the only ones messing with this one, so just restore the
11034 cached value and reload the field. */
11035 if ( pDbgState->fModifiedProcCtls2
11036 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11037 {
11038 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11039 AssertRC(rc2);
11040 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11041 }
11042
11043 /* If we've modified the exception bitmap, we restore it and trigger
11044 reloading and partial recalculation the next time around. */
11045 if (pDbgState->fModifiedXcptBitmap)
11046 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11047
11048 return rcStrict;
11049}
11050
11051
11052/**
11053 * Configures VM-exit controls for current DBGF and DTrace settings.
11054 *
11055 * This updates @a pDbgState and the VMCS execution control fields to reflect
11056 * the necessary VM-exits demanded by DBGF and DTrace.
11057 *
11058 * @param pVCpu The cross context virtual CPU structure.
11059 * @param pVmxTransient The VMX-transient structure. May update
11060 * fUpdatedTscOffsettingAndPreemptTimer.
11061 * @param pDbgState The debug state.
11062 */
11063static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11064{
11065#ifndef IN_NEM_DARWIN
11066 /*
11067 * Take down the dtrace serial number so we can spot changes.
11068 */
11069 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11070 ASMCompilerBarrier();
11071#endif
11072
11073 /*
11074 * We'll rebuild most of the middle block of data members (holding the
11075 * current settings) as we go along here, so start by clearing it all.
11076 */
11077 pDbgState->bmXcptExtra = 0;
11078 pDbgState->fCpe1Extra = 0;
11079 pDbgState->fCpe1Unwanted = 0;
11080 pDbgState->fCpe2Extra = 0;
11081 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11082 pDbgState->bmExitsToCheck[i] = 0;
11083
11084 /*
11085 * Software interrupts (INT XXh) - no idea how to trigger these...
11086 */
11087 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11088 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11089 || VBOXVMM_INT_SOFTWARE_ENABLED())
11090 {
11091 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11092 }
11093
11094 /*
11095 * INT3 breakpoints - triggered by #BP exceptions.
11096 */
11097 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11098 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11099
11100 /*
11101 * Exception bitmap and XCPT events+probes.
11102 */
11103 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11104 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11105 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11106
11107 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11108 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11109 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11110 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11111 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11112 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11113 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11114 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11115 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11116 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11117 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11118 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11119 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11120 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11121 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11122 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11123 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11124 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11125
11126 if (pDbgState->bmXcptExtra)
11127 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11128
11129 /*
11130 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11131 *
11132 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11133 * So, when adding/changing/removing please don't forget to update it.
11134 *
11135 * Some of the macros are picking up local variables to save horizontal space,
11136 * (being able to see it in a table is the lesser evil here).
11137 */
11138#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11139 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11140 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11141#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11142 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11143 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11144 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11145 } else do { } while (0)
11146#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11147 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11148 { \
11149 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11150 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11151 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11152 } else do { } while (0)
11153#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11154 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11155 { \
11156 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11157 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11158 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11159 } else do { } while (0)
11160#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11161 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11162 { \
11163 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11164 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11165 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11166 } else do { } while (0)
11167
11168 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11169 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11170 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11171 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11172 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11173
11174 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11175 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11176 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11177 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11178 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11179 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11180 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11181 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11182 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11183 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11184 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11185 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11186 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11187 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11188 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11189 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11190 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11191 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11192 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11193 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11194 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11195 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11196 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11197 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11198 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11199 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11200 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11201 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11202 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11203 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11204 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11205 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11206 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11207 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11208 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11209 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11210
11211 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11212 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11213 {
11214 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11215 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11216 AssertRC(rc);
11217
11218#if 0 /** @todo fix me */
11219 pDbgState->fClearCr0Mask = true;
11220 pDbgState->fClearCr4Mask = true;
11221#endif
11222 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11223 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11224 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11225 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11226 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11227 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11228 require clearing here and in the loop if we start using it. */
11229 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11230 }
11231 else
11232 {
11233 if (pDbgState->fClearCr0Mask)
11234 {
11235 pDbgState->fClearCr0Mask = false;
11236 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11237 }
11238 if (pDbgState->fClearCr4Mask)
11239 {
11240 pDbgState->fClearCr4Mask = false;
11241 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11242 }
11243 }
11244 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11245 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11246
11247 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11248 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11249 {
11250 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11251 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11252 }
11253 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11254 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11255
11256 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11257 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11258 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11259 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11260 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11261 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11262 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11263 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11264#if 0 /** @todo too slow, fix handler. */
11265 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11266#endif
11267 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11268
11269 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11270 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11271 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11272 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11273 {
11274 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11275 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11276 }
11277 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11278 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11279 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11280 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11281
11282 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11283 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11284 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11285 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11286 {
11287 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11288 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11289 }
11290 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11291 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11292 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11293 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11294
11295 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11296 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11297 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11298 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11299 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11300 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11301 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11302 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11303 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11304 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11305 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11306 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11307 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11308 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11309 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11310 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11311 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11312 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11313 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11314 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11315 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11316 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11317
11318#undef IS_EITHER_ENABLED
11319#undef SET_ONLY_XBM_IF_EITHER_EN
11320#undef SET_CPE1_XBM_IF_EITHER_EN
11321#undef SET_CPEU_XBM_IF_EITHER_EN
11322#undef SET_CPE2_XBM_IF_EITHER_EN
11323
11324 /*
11325 * Sanitize the control stuff.
11326 */
11327 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11328 if (pDbgState->fCpe2Extra)
11329 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11330 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11331 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11332#ifndef IN_NEM_DARWIN
11333 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11334 {
11335 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11336 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11337 }
11338#else
11339 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11340 {
11341 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11342 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11343 }
11344#endif
11345
11346 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11347 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11348 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11349 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11350}
11351
11352
11353/**
11354 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11355 * appropriate.
11356 *
11357 * The caller has checked the VM-exit against the
11358 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11359 * already, so we don't have to do that either.
11360 *
11361 * @returns Strict VBox status code (i.e. informational status codes too).
11362 * @param pVCpu The cross context virtual CPU structure.
11363 * @param pVmxTransient The VMX-transient structure.
11364 * @param uExitReason The VM-exit reason.
11365 *
11366 * @remarks The name of this function is displayed by dtrace, so keep it short
11367 * and to the point. No longer than 33 chars long, please.
11368 */
11369static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11370{
11371 /*
11372 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11373 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11374 *
11375 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11376 * does. Must add/change/remove both places. Same ordering, please.
11377 *
11378 * Added/removed events must also be reflected in the next section
11379 * where we dispatch dtrace events.
11380 */
11381 bool fDtrace1 = false;
11382 bool fDtrace2 = false;
11383 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11384 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11385 uint32_t uEventArg = 0;
11386#define SET_EXIT(a_EventSubName) \
11387 do { \
11388 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11389 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11390 } while (0)
11391#define SET_BOTH(a_EventSubName) \
11392 do { \
11393 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11394 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11395 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11396 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11397 } while (0)
11398 switch (uExitReason)
11399 {
11400 case VMX_EXIT_MTF:
11401 return vmxHCExitMtf(pVCpu, pVmxTransient);
11402
11403 case VMX_EXIT_XCPT_OR_NMI:
11404 {
11405 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11406 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11407 {
11408 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11409 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11410 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11411 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11412 {
11413 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11414 {
11415 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11416 uEventArg = pVmxTransient->uExitIntErrorCode;
11417 }
11418 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11419 switch (enmEvent1)
11420 {
11421 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11422 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11423 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11424 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11425 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11426 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11427 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11428 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11429 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11430 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11431 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11432 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11433 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11434 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11435 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11436 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11437 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11438 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11439 default: break;
11440 }
11441 }
11442 else
11443 AssertFailed();
11444 break;
11445
11446 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11447 uEventArg = idxVector;
11448 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11449 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11450 break;
11451 }
11452 break;
11453 }
11454
11455 case VMX_EXIT_TRIPLE_FAULT:
11456 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11457 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11458 break;
11459 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11460 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11461 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11462 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11463 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11464
11465 /* Instruction specific VM-exits: */
11466 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11467 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11468 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11469 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11470 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11471 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11472 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11473 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11474 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11475 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11476 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11477 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11478 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11479 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11480 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11481 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11482 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11483 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11484 case VMX_EXIT_MOV_CRX:
11485 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11486 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11487 SET_BOTH(CRX_READ);
11488 else
11489 SET_BOTH(CRX_WRITE);
11490 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11491 break;
11492 case VMX_EXIT_MOV_DRX:
11493 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11494 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11495 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11496 SET_BOTH(DRX_READ);
11497 else
11498 SET_BOTH(DRX_WRITE);
11499 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11500 break;
11501 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11502 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11503 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11504 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11505 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11506 case VMX_EXIT_GDTR_IDTR_ACCESS:
11507 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11508 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11509 {
11510 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11511 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11512 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11513 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11514 }
11515 break;
11516
11517 case VMX_EXIT_LDTR_TR_ACCESS:
11518 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11519 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11520 {
11521 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11522 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11523 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11524 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11525 }
11526 break;
11527
11528 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11529 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11530 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11531 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11532 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11533 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11534 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11535 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11536 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11537 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11538 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11539
11540 /* Events that aren't relevant at this point. */
11541 case VMX_EXIT_EXT_INT:
11542 case VMX_EXIT_INT_WINDOW:
11543 case VMX_EXIT_NMI_WINDOW:
11544 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11545 case VMX_EXIT_PREEMPT_TIMER:
11546 case VMX_EXIT_IO_INSTR:
11547 break;
11548
11549 /* Errors and unexpected events. */
11550 case VMX_EXIT_INIT_SIGNAL:
11551 case VMX_EXIT_SIPI:
11552 case VMX_EXIT_IO_SMI:
11553 case VMX_EXIT_SMI:
11554 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11555 case VMX_EXIT_ERR_MSR_LOAD:
11556 case VMX_EXIT_ERR_MACHINE_CHECK:
11557 case VMX_EXIT_PML_FULL:
11558 case VMX_EXIT_VIRTUALIZED_EOI:
11559 break;
11560
11561 default:
11562 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11563 break;
11564 }
11565#undef SET_BOTH
11566#undef SET_EXIT
11567
11568 /*
11569 * Dtrace tracepoints go first. We do them here at once so we don't
11570 * have to copy the guest state saving and stuff a few dozen times.
11571 * Down side is that we've got to repeat the switch, though this time
11572 * we use enmEvent since the probes are a subset of what DBGF does.
11573 */
11574 if (fDtrace1 || fDtrace2)
11575 {
11576 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11577 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11578 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11579 switch (enmEvent1)
11580 {
11581 /** @todo consider which extra parameters would be helpful for each probe. */
11582 case DBGFEVENT_END: break;
11583 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11584 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11585 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11586 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11587 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11588 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11589 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11590 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11591 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11592 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11593 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11594 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11595 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11596 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11597 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11598 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11599 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11600 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11601 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11602 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11603 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11604 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11605 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11606 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11607 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11608 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11609 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11610 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11611 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11612 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11613 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11614 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11615 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11616 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11617 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11618 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11619 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11620 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11621 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11622 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11623 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11624 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11625 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11626 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11627 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11628 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11629 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11630 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11631 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11632 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11633 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11634 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11635 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11636 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11637 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11638 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11639 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11640 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11641 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11642 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11643 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11644 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11645 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11646 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11647 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11648 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11649 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11650 }
11651 switch (enmEvent2)
11652 {
11653 /** @todo consider which extra parameters would be helpful for each probe. */
11654 case DBGFEVENT_END: break;
11655 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11656 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11657 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11658 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11659 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11660 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11661 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11662 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11663 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11664 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11665 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11666 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11667 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11668 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11669 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11670 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11671 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11672 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11673 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11674 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11675 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11676 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11677 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11678 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11679 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11680 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11681 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11682 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11683 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11684 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11685 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11686 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11687 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11688 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11689 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11690 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11691 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11692 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11693 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11694 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11695 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11696 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11697 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11698 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11699 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11700 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11701 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11702 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11703 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11704 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11705 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11706 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11707 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11708 }
11709 }
11710
11711 /*
11712 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11713 * the DBGF call will do a full check).
11714 *
11715 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11716 * Note! If we have to events, we prioritize the first, i.e. the instruction
11717 * one, in order to avoid event nesting.
11718 */
11719 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11720 if ( enmEvent1 != DBGFEVENT_END
11721 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11722 {
11723 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11724 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11725 if (rcStrict != VINF_SUCCESS)
11726 return rcStrict;
11727 }
11728 else if ( enmEvent2 != DBGFEVENT_END
11729 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11730 {
11731 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11732 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11733 if (rcStrict != VINF_SUCCESS)
11734 return rcStrict;
11735 }
11736
11737 return VINF_SUCCESS;
11738}
11739
11740
11741/**
11742 * Single-stepping VM-exit filtering.
11743 *
11744 * This is preprocessing the VM-exits and deciding whether we've gotten far
11745 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11746 * handling is performed.
11747 *
11748 * @returns Strict VBox status code (i.e. informational status codes too).
11749 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11750 * @param pVmxTransient The VMX-transient structure.
11751 * @param pDbgState The debug state.
11752 */
11753DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11754{
11755 /*
11756 * Expensive (saves context) generic dtrace VM-exit probe.
11757 */
11758 uint32_t const uExitReason = pVmxTransient->uExitReason;
11759 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11760 { /* more likely */ }
11761 else
11762 {
11763 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11764 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11765 AssertRC(rc);
11766 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11767 }
11768
11769#ifndef IN_NEM_DARWIN
11770 /*
11771 * Check for host NMI, just to get that out of the way.
11772 */
11773 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11774 { /* normally likely */ }
11775 else
11776 {
11777 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11778 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11779 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11780 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11781 }
11782#endif
11783
11784 /*
11785 * Check for single stepping event if we're stepping.
11786 */
11787 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11788 {
11789 switch (uExitReason)
11790 {
11791 case VMX_EXIT_MTF:
11792 return vmxHCExitMtf(pVCpu, pVmxTransient);
11793
11794 /* Various events: */
11795 case VMX_EXIT_XCPT_OR_NMI:
11796 case VMX_EXIT_EXT_INT:
11797 case VMX_EXIT_TRIPLE_FAULT:
11798 case VMX_EXIT_INT_WINDOW:
11799 case VMX_EXIT_NMI_WINDOW:
11800 case VMX_EXIT_TASK_SWITCH:
11801 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11802 case VMX_EXIT_APIC_ACCESS:
11803 case VMX_EXIT_EPT_VIOLATION:
11804 case VMX_EXIT_EPT_MISCONFIG:
11805 case VMX_EXIT_PREEMPT_TIMER:
11806
11807 /* Instruction specific VM-exits: */
11808 case VMX_EXIT_CPUID:
11809 case VMX_EXIT_GETSEC:
11810 case VMX_EXIT_HLT:
11811 case VMX_EXIT_INVD:
11812 case VMX_EXIT_INVLPG:
11813 case VMX_EXIT_RDPMC:
11814 case VMX_EXIT_RDTSC:
11815 case VMX_EXIT_RSM:
11816 case VMX_EXIT_VMCALL:
11817 case VMX_EXIT_VMCLEAR:
11818 case VMX_EXIT_VMLAUNCH:
11819 case VMX_EXIT_VMPTRLD:
11820 case VMX_EXIT_VMPTRST:
11821 case VMX_EXIT_VMREAD:
11822 case VMX_EXIT_VMRESUME:
11823 case VMX_EXIT_VMWRITE:
11824 case VMX_EXIT_VMXOFF:
11825 case VMX_EXIT_VMXON:
11826 case VMX_EXIT_MOV_CRX:
11827 case VMX_EXIT_MOV_DRX:
11828 case VMX_EXIT_IO_INSTR:
11829 case VMX_EXIT_RDMSR:
11830 case VMX_EXIT_WRMSR:
11831 case VMX_EXIT_MWAIT:
11832 case VMX_EXIT_MONITOR:
11833 case VMX_EXIT_PAUSE:
11834 case VMX_EXIT_GDTR_IDTR_ACCESS:
11835 case VMX_EXIT_LDTR_TR_ACCESS:
11836 case VMX_EXIT_INVEPT:
11837 case VMX_EXIT_RDTSCP:
11838 case VMX_EXIT_INVVPID:
11839 case VMX_EXIT_WBINVD:
11840 case VMX_EXIT_XSETBV:
11841 case VMX_EXIT_RDRAND:
11842 case VMX_EXIT_INVPCID:
11843 case VMX_EXIT_VMFUNC:
11844 case VMX_EXIT_RDSEED:
11845 case VMX_EXIT_XSAVES:
11846 case VMX_EXIT_XRSTORS:
11847 {
11848 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11849 AssertRCReturn(rc, rc);
11850 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11851 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11852 return VINF_EM_DBG_STEPPED;
11853 break;
11854 }
11855
11856 /* Errors and unexpected events: */
11857 case VMX_EXIT_INIT_SIGNAL:
11858 case VMX_EXIT_SIPI:
11859 case VMX_EXIT_IO_SMI:
11860 case VMX_EXIT_SMI:
11861 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11862 case VMX_EXIT_ERR_MSR_LOAD:
11863 case VMX_EXIT_ERR_MACHINE_CHECK:
11864 case VMX_EXIT_PML_FULL:
11865 case VMX_EXIT_VIRTUALIZED_EOI:
11866 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11867 break;
11868
11869 default:
11870 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11871 break;
11872 }
11873 }
11874
11875 /*
11876 * Check for debugger event breakpoints and dtrace probes.
11877 */
11878 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11879 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11880 {
11881 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11882 if (rcStrict != VINF_SUCCESS)
11883 return rcStrict;
11884 }
11885
11886 /*
11887 * Normal processing.
11888 */
11889#ifdef HMVMX_USE_FUNCTION_TABLE
11890 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11891#else
11892 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11893#endif
11894}
11895
11896/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette