VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 97083

Last change on this file since 97083 was 97083, checked in by vboxsync, 2 years ago

VMM/HMVMXR0: Some vmxHCImportGuestStateInner tweaking, reducing code size by about 1K.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 521.0 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 97083 2022-10-11 00:00:35Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330
331 /* 16-bit guest-state fields. */
332 VMX_VMCS16_GUEST_ES_SEL,
333 VMX_VMCS16_GUEST_CS_SEL,
334 VMX_VMCS16_GUEST_SS_SEL,
335 VMX_VMCS16_GUEST_DS_SEL,
336 VMX_VMCS16_GUEST_FS_SEL,
337 VMX_VMCS16_GUEST_GS_SEL,
338 VMX_VMCS16_GUEST_LDTR_SEL,
339 VMX_VMCS16_GUEST_TR_SEL,
340 VMX_VMCS16_GUEST_INTR_STATUS,
341 VMX_VMCS16_GUEST_PML_INDEX,
342
343 /* 16-bits host-state fields. */
344 VMX_VMCS16_HOST_ES_SEL,
345 VMX_VMCS16_HOST_CS_SEL,
346 VMX_VMCS16_HOST_SS_SEL,
347 VMX_VMCS16_HOST_DS_SEL,
348 VMX_VMCS16_HOST_FS_SEL,
349 VMX_VMCS16_HOST_GS_SEL,
350 VMX_VMCS16_HOST_TR_SEL,
351
352 /* 64-bit control fields. */
353 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
354 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
355 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
357 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
358 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
359 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
361 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
363 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
365 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
367 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
369 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
370 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
371 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
373 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
375 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
377 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
379 VMX_VMCS64_CTRL_EPTP_FULL,
380 VMX_VMCS64_CTRL_EPTP_HIGH,
381 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
383 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
385 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
387 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
389 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
390 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
391 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
393 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
395 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
397 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
399 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
401 VMX_VMCS64_CTRL_SPPTP_FULL,
402 VMX_VMCS64_CTRL_SPPTP_HIGH,
403 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
405 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
406 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
407 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
409
410 /* 64-bit read-only data fields. */
411 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
412 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
413
414 /* 64-bit guest-state fields. */
415 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
416 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
417 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
418 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
419 VMX_VMCS64_GUEST_PAT_FULL,
420 VMX_VMCS64_GUEST_PAT_HIGH,
421 VMX_VMCS64_GUEST_EFER_FULL,
422 VMX_VMCS64_GUEST_EFER_HIGH,
423 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
424 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
425 VMX_VMCS64_GUEST_PDPTE0_FULL,
426 VMX_VMCS64_GUEST_PDPTE0_HIGH,
427 VMX_VMCS64_GUEST_PDPTE1_FULL,
428 VMX_VMCS64_GUEST_PDPTE1_HIGH,
429 VMX_VMCS64_GUEST_PDPTE2_FULL,
430 VMX_VMCS64_GUEST_PDPTE2_HIGH,
431 VMX_VMCS64_GUEST_PDPTE3_FULL,
432 VMX_VMCS64_GUEST_PDPTE3_HIGH,
433 VMX_VMCS64_GUEST_BNDCFGS_FULL,
434 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
435 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
436 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
437 VMX_VMCS64_GUEST_PKRS_FULL,
438 VMX_VMCS64_GUEST_PKRS_HIGH,
439
440 /* 64-bit host-state fields. */
441 VMX_VMCS64_HOST_PAT_FULL,
442 VMX_VMCS64_HOST_PAT_HIGH,
443 VMX_VMCS64_HOST_EFER_FULL,
444 VMX_VMCS64_HOST_EFER_HIGH,
445 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
446 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
447 VMX_VMCS64_HOST_PKRS_FULL,
448 VMX_VMCS64_HOST_PKRS_HIGH,
449
450 /* 32-bit control fields. */
451 VMX_VMCS32_CTRL_PIN_EXEC,
452 VMX_VMCS32_CTRL_PROC_EXEC,
453 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
454 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
455 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
456 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
457 VMX_VMCS32_CTRL_EXIT,
458 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
459 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
460 VMX_VMCS32_CTRL_ENTRY,
461 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
462 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
463 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
464 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
465 VMX_VMCS32_CTRL_TPR_THRESHOLD,
466 VMX_VMCS32_CTRL_PROC_EXEC2,
467 VMX_VMCS32_CTRL_PLE_GAP,
468 VMX_VMCS32_CTRL_PLE_WINDOW,
469
470 /* 32-bits read-only fields. */
471 VMX_VMCS32_RO_VM_INSTR_ERROR,
472 VMX_VMCS32_RO_EXIT_REASON,
473 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
474 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
475 VMX_VMCS32_RO_IDT_VECTORING_INFO,
476 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
477 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
478 VMX_VMCS32_RO_EXIT_INSTR_INFO,
479
480 /* 32-bit guest-state fields. */
481 VMX_VMCS32_GUEST_ES_LIMIT,
482 VMX_VMCS32_GUEST_CS_LIMIT,
483 VMX_VMCS32_GUEST_SS_LIMIT,
484 VMX_VMCS32_GUEST_DS_LIMIT,
485 VMX_VMCS32_GUEST_FS_LIMIT,
486 VMX_VMCS32_GUEST_GS_LIMIT,
487 VMX_VMCS32_GUEST_LDTR_LIMIT,
488 VMX_VMCS32_GUEST_TR_LIMIT,
489 VMX_VMCS32_GUEST_GDTR_LIMIT,
490 VMX_VMCS32_GUEST_IDTR_LIMIT,
491 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
492 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
493 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
494 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
495 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
496 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
497 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
498 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_INT_STATE,
500 VMX_VMCS32_GUEST_ACTIVITY_STATE,
501 VMX_VMCS32_GUEST_SMBASE,
502 VMX_VMCS32_GUEST_SYSENTER_CS,
503 VMX_VMCS32_PREEMPT_TIMER_VALUE,
504
505 /* 32-bit host-state fields. */
506 VMX_VMCS32_HOST_SYSENTER_CS,
507
508 /* Natural-width control fields. */
509 VMX_VMCS_CTRL_CR0_MASK,
510 VMX_VMCS_CTRL_CR4_MASK,
511 VMX_VMCS_CTRL_CR0_READ_SHADOW,
512 VMX_VMCS_CTRL_CR4_READ_SHADOW,
513 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
514 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
515 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
516 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
517
518 /* Natural-width read-only data fields. */
519 VMX_VMCS_RO_EXIT_QUALIFICATION,
520 VMX_VMCS_RO_IO_RCX,
521 VMX_VMCS_RO_IO_RSI,
522 VMX_VMCS_RO_IO_RDI,
523 VMX_VMCS_RO_IO_RIP,
524 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
525
526 /* Natural-width guest-state field */
527 VMX_VMCS_GUEST_CR0,
528 VMX_VMCS_GUEST_CR3,
529 VMX_VMCS_GUEST_CR4,
530 VMX_VMCS_GUEST_ES_BASE,
531 VMX_VMCS_GUEST_CS_BASE,
532 VMX_VMCS_GUEST_SS_BASE,
533 VMX_VMCS_GUEST_DS_BASE,
534 VMX_VMCS_GUEST_FS_BASE,
535 VMX_VMCS_GUEST_GS_BASE,
536 VMX_VMCS_GUEST_LDTR_BASE,
537 VMX_VMCS_GUEST_TR_BASE,
538 VMX_VMCS_GUEST_GDTR_BASE,
539 VMX_VMCS_GUEST_IDTR_BASE,
540 VMX_VMCS_GUEST_DR7,
541 VMX_VMCS_GUEST_RSP,
542 VMX_VMCS_GUEST_RIP,
543 VMX_VMCS_GUEST_RFLAGS,
544 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
545 VMX_VMCS_GUEST_SYSENTER_ESP,
546 VMX_VMCS_GUEST_SYSENTER_EIP,
547 VMX_VMCS_GUEST_S_CET,
548 VMX_VMCS_GUEST_SSP,
549 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
550
551 /* Natural-width host-state fields */
552 VMX_VMCS_HOST_CR0,
553 VMX_VMCS_HOST_CR3,
554 VMX_VMCS_HOST_CR4,
555 VMX_VMCS_HOST_FS_BASE,
556 VMX_VMCS_HOST_GS_BASE,
557 VMX_VMCS_HOST_TR_BASE,
558 VMX_VMCS_HOST_GDTR_BASE,
559 VMX_VMCS_HOST_IDTR_BASE,
560 VMX_VMCS_HOST_SYSENTER_ESP,
561 VMX_VMCS_HOST_SYSENTER_EIP,
562 VMX_VMCS_HOST_RSP,
563 VMX_VMCS_HOST_RIP,
564 VMX_VMCS_HOST_S_CET,
565 VMX_VMCS_HOST_SSP,
566 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
567};
568#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
569
570#ifdef HMVMX_USE_FUNCTION_TABLE
571/**
572 * VMX_EXIT dispatch table.
573 */
574static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
575{
576 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
577 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
578 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
579 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
580 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
581 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
582 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
583 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
584 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
585 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
586 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
587 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
588 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
589 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
590 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
591 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
592 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
593 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
594 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
595#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
596 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
597 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
598 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
599 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
600 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
601 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
602 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
603 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
604 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
605#else
606 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
607 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
608 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
609 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
610 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
611 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
612 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
613 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
614 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
615#endif
616 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
617 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
618 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
619 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
620 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
621 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
622 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
623 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
624 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
625 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
626 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
627 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
628 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
629 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
630 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
632 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
633 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
634 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
635 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
636 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
637 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
638#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
639 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
640#else
641 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
642#endif
643 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
644 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
646 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
647#else
648 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
651 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
652 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
653 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
654 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
655 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
656 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
657 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
658 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
659 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
660 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
661 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
662 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
663 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
664 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
665 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
666};
667#endif /* HMVMX_USE_FUNCTION_TABLE */
668
669#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
670static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
671{
672 /* 0 */ "(Not Used)",
673 /* 1 */ "VMCALL executed in VMX root operation.",
674 /* 2 */ "VMCLEAR with invalid physical address.",
675 /* 3 */ "VMCLEAR with VMXON pointer.",
676 /* 4 */ "VMLAUNCH with non-clear VMCS.",
677 /* 5 */ "VMRESUME with non-launched VMCS.",
678 /* 6 */ "VMRESUME after VMXOFF",
679 /* 7 */ "VM-entry with invalid control fields.",
680 /* 8 */ "VM-entry with invalid host state fields.",
681 /* 9 */ "VMPTRLD with invalid physical address.",
682 /* 10 */ "VMPTRLD with VMXON pointer.",
683 /* 11 */ "VMPTRLD with incorrect revision identifier.",
684 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
685 /* 13 */ "VMWRITE to read-only VMCS component.",
686 /* 14 */ "(Not Used)",
687 /* 15 */ "VMXON executed in VMX root operation.",
688 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
689 /* 17 */ "VM-entry with non-launched executing VMCS.",
690 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
691 /* 19 */ "VMCALL with non-clear VMCS.",
692 /* 20 */ "VMCALL with invalid VM-exit control fields.",
693 /* 21 */ "(Not Used)",
694 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
695 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
696 /* 24 */ "VMCALL with invalid SMM-monitor features.",
697 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
698 /* 26 */ "VM-entry with events blocked by MOV SS.",
699 /* 27 */ "(Not Used)",
700 /* 28 */ "Invalid operand to INVEPT/INVVPID."
701};
702#endif /* VBOX_STRICT && LOG_ENABLED */
703
704
705/**
706 * Gets the CR0 guest/host mask.
707 *
708 * These bits typically does not change through the lifetime of a VM. Any bit set in
709 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
710 * by the guest.
711 *
712 * @returns The CR0 guest/host mask.
713 * @param pVCpu The cross context virtual CPU structure.
714 */
715static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
716{
717 /*
718 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
719 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
720 *
721 * Furthermore, modifications to any bits that are reserved/unspecified currently
722 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
723 * when future CPUs specify and use currently reserved/unspecified bits.
724 */
725 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
726 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
727 * and @bugref{6944}. */
728 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
729 return ( X86_CR0_PE
730 | X86_CR0_NE
731 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
732 | X86_CR0_PG
733 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
734}
735
736
737/**
738 * Gets the CR4 guest/host mask.
739 *
740 * These bits typically does not change through the lifetime of a VM. Any bit set in
741 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
742 * by the guest.
743 *
744 * @returns The CR4 guest/host mask.
745 * @param pVCpu The cross context virtual CPU structure.
746 */
747static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
748{
749 /*
750 * We construct a mask of all CR4 bits that the guest can modify without causing
751 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
752 * a VM-exit when the guest attempts to modify them when executing using
753 * hardware-assisted VMX.
754 *
755 * When a feature is not exposed to the guest (and may be present on the host),
756 * we want to intercept guest modifications to the bit so we can emulate proper
757 * behavior (e.g., #GP).
758 *
759 * Furthermore, only modifications to those bits that don't require immediate
760 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
761 * depends on CR3 which might not always be the guest value while executing
762 * using hardware-assisted VMX.
763 */
764 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
765 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
766#ifdef IN_NEM_DARWIN
767 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
768#endif
769 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
770
771 /*
772 * Paranoia.
773 * Ensure features exposed to the guest are present on the host.
774 */
775 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
776#ifdef IN_NEM_DARWIN
777 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
778#endif
779 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
780
781 uint64_t const fGstMask = X86_CR4_PVI
782 | X86_CR4_TSD
783 | X86_CR4_DE
784 | X86_CR4_MCE
785 | X86_CR4_PCE
786 | X86_CR4_OSXMMEEXCPT
787 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
788#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
789 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
790 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
791#endif
792 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
793 return ~fGstMask;
794}
795
796
797/**
798 * Adds one or more exceptions to the exception bitmap and commits it to the current
799 * VMCS.
800 *
801 * @param pVCpu The cross context virtual CPU structure.
802 * @param pVmxTransient The VMX-transient structure.
803 * @param uXcptMask The exception(s) to add.
804 */
805static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
806{
807 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
808 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
809 if ((uXcptBitmap & uXcptMask) != uXcptMask)
810 {
811 uXcptBitmap |= uXcptMask;
812 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
813 AssertRC(rc);
814 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
815 }
816}
817
818
819/**
820 * Adds an exception to the exception bitmap and commits it to the current VMCS.
821 *
822 * @param pVCpu The cross context virtual CPU structure.
823 * @param pVmxTransient The VMX-transient structure.
824 * @param uXcpt The exception to add.
825 */
826static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
827{
828 Assert(uXcpt <= X86_XCPT_LAST);
829 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
830}
831
832
833/**
834 * Remove one or more exceptions from the exception bitmap and commits it to the
835 * current VMCS.
836 *
837 * This takes care of not removing the exception intercept if a nested-guest
838 * requires the exception to be intercepted.
839 *
840 * @returns VBox status code.
841 * @param pVCpu The cross context virtual CPU structure.
842 * @param pVmxTransient The VMX-transient structure.
843 * @param uXcptMask The exception(s) to remove.
844 */
845static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
846{
847 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
848 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
849 if (u32XcptBitmap & uXcptMask)
850 {
851#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
852 if (!pVmxTransient->fIsNestedGuest)
853 { /* likely */ }
854 else
855 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
856#endif
857#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
858 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
859 | RT_BIT(X86_XCPT_DE)
860 | RT_BIT(X86_XCPT_NM)
861 | RT_BIT(X86_XCPT_TS)
862 | RT_BIT(X86_XCPT_UD)
863 | RT_BIT(X86_XCPT_NP)
864 | RT_BIT(X86_XCPT_SS)
865 | RT_BIT(X86_XCPT_GP)
866 | RT_BIT(X86_XCPT_PF)
867 | RT_BIT(X86_XCPT_MF));
868#elif defined(HMVMX_ALWAYS_TRAP_PF)
869 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
870#endif
871 if (uXcptMask)
872 {
873 /* Validate we are not removing any essential exception intercepts. */
874#ifndef IN_NEM_DARWIN
875 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
876#else
877 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
878#endif
879 NOREF(pVCpu);
880 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
881 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
882
883 /* Remove it from the exception bitmap. */
884 u32XcptBitmap &= ~uXcptMask;
885
886 /* Commit and update the cache if necessary. */
887 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
888 {
889 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
890 AssertRC(rc);
891 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
892 }
893 }
894 }
895 return VINF_SUCCESS;
896}
897
898
899/**
900 * Remove an exceptions from the exception bitmap and commits it to the current
901 * VMCS.
902 *
903 * @returns VBox status code.
904 * @param pVCpu The cross context virtual CPU structure.
905 * @param pVmxTransient The VMX-transient structure.
906 * @param uXcpt The exception to remove.
907 */
908static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
909{
910 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
911}
912
913#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
914
915/**
916 * Loads the shadow VMCS specified by the VMCS info. object.
917 *
918 * @returns VBox status code.
919 * @param pVmcsInfo The VMCS info. object.
920 *
921 * @remarks Can be called with interrupts disabled.
922 */
923static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
924{
925 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
926 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
927
928 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
929 if (RT_SUCCESS(rc))
930 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
931 return rc;
932}
933
934
935/**
936 * Clears the shadow VMCS specified by the VMCS info. object.
937 *
938 * @returns VBox status code.
939 * @param pVmcsInfo The VMCS info. object.
940 *
941 * @remarks Can be called with interrupts disabled.
942 */
943static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
944{
945 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
946 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
947
948 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
949 if (RT_SUCCESS(rc))
950 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
951 return rc;
952}
953
954
955/**
956 * Switches from and to the specified VMCSes.
957 *
958 * @returns VBox status code.
959 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
960 * @param pVmcsInfoTo The VMCS info. object we are switching to.
961 *
962 * @remarks Called with interrupts disabled.
963 */
964static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
965{
966 /*
967 * Clear the VMCS we are switching out if it has not already been cleared.
968 * This will sync any CPU internal data back to the VMCS.
969 */
970 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
971 {
972 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
973 if (RT_SUCCESS(rc))
974 {
975 /*
976 * The shadow VMCS, if any, would not be active at this point since we
977 * would have cleared it while importing the virtual hardware-virtualization
978 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
979 * clear the shadow VMCS here, just assert for safety.
980 */
981 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
982 }
983 else
984 return rc;
985 }
986
987 /*
988 * Clear the VMCS we are switching to if it has not already been cleared.
989 * This will initialize the VMCS launch state to "clear" required for loading it.
990 *
991 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
992 */
993 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
994 {
995 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
996 if (RT_SUCCESS(rc))
997 { /* likely */ }
998 else
999 return rc;
1000 }
1001
1002 /*
1003 * Finally, load the VMCS we are switching to.
1004 */
1005 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1006}
1007
1008
1009/**
1010 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1011 * caller.
1012 *
1013 * @returns VBox status code.
1014 * @param pVCpu The cross context virtual CPU structure.
1015 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1016 * true) or guest VMCS (pass false).
1017 */
1018static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1019{
1020 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1021 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1022
1023 PVMXVMCSINFO pVmcsInfoFrom;
1024 PVMXVMCSINFO pVmcsInfoTo;
1025 if (fSwitchToNstGstVmcs)
1026 {
1027 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1028 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1029 }
1030 else
1031 {
1032 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1033 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1034 }
1035
1036 /*
1037 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1038 * preemption hook code path acquires the current VMCS.
1039 */
1040 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1041
1042 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1043 if (RT_SUCCESS(rc))
1044 {
1045 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1046 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1047
1048 /*
1049 * If we are switching to a VMCS that was executed on a different host CPU or was
1050 * never executed before, flag that we need to export the host state before executing
1051 * guest/nested-guest code using hardware-assisted VMX.
1052 *
1053 * This could probably be done in a preemptible context since the preemption hook
1054 * will flag the necessary change in host context. However, since preemption is
1055 * already disabled and to avoid making assumptions about host specific code in
1056 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1057 * disabled.
1058 */
1059 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1060 { /* likely */ }
1061 else
1062 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1063
1064 ASMSetFlags(fEFlags);
1065
1066 /*
1067 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1068 * flag that we need to update the host MSR values there. Even if we decide in the
1069 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1070 * if its content differs, we would have to update the host MSRs anyway.
1071 */
1072 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1073 }
1074 else
1075 ASMSetFlags(fEFlags);
1076 return rc;
1077}
1078
1079#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1080#ifdef VBOX_STRICT
1081
1082/**
1083 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1084 * transient structure.
1085 *
1086 * @param pVCpu The cross context virtual CPU structure.
1087 * @param pVmxTransient The VMX-transient structure.
1088 */
1089DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1090{
1091 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1092 AssertRC(rc);
1093}
1094
1095
1096/**
1097 * Reads the VM-entry exception error code field from the VMCS into
1098 * the VMX transient structure.
1099 *
1100 * @param pVCpu The cross context virtual CPU structure.
1101 * @param pVmxTransient The VMX-transient structure.
1102 */
1103DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1104{
1105 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1106 AssertRC(rc);
1107}
1108
1109
1110/**
1111 * Reads the VM-entry exception error code field from the VMCS into
1112 * the VMX transient structure.
1113 *
1114 * @param pVCpu The cross context virtual CPU structure.
1115 * @param pVmxTransient The VMX-transient structure.
1116 */
1117DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1118{
1119 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1120 AssertRC(rc);
1121}
1122
1123#endif /* VBOX_STRICT */
1124
1125
1126/**
1127 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1128 *
1129 * Don't call directly unless the it's likely that some or all of the fields
1130 * given in @a a_fReadMask have already been read.
1131 *
1132 * @tparam a_fReadMask The fields to read.
1133 * @param pVCpu The cross context virtual CPU structure.
1134 * @param pVmxTransient The VMX-transient structure.
1135 */
1136template<uint32_t const a_fReadMask>
1137static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1138{
1139 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1140 | HMVMX_READ_EXIT_INSTR_LEN
1141 | HMVMX_READ_EXIT_INSTR_INFO
1142 | HMVMX_READ_IDT_VECTORING_INFO
1143 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1144 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1145 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1146 | HMVMX_READ_GUEST_LINEAR_ADDR
1147 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1148 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1149 )) == 0);
1150
1151 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1152 {
1153 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1154
1155 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1156 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1157 {
1158 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1159 AssertRC(rc);
1160 }
1161 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1162 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1163 {
1164 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1165 AssertRC(rc);
1166 }
1167 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1168 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1169 {
1170 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1171 AssertRC(rc);
1172 }
1173 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1174 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1175 {
1176 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1177 AssertRC(rc);
1178 }
1179 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1180 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1181 {
1182 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1183 AssertRC(rc);
1184 }
1185 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1186 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1187 {
1188 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1189 AssertRC(rc);
1190 }
1191 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1192 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1193 {
1194 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1195 AssertRC(rc);
1196 }
1197 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1198 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1199 {
1200 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1201 AssertRC(rc);
1202 }
1203 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1204 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1205 {
1206 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1207 AssertRC(rc);
1208 }
1209 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1210 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1211 {
1212 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1213 AssertRC(rc);
1214 }
1215
1216 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1217 }
1218}
1219
1220
1221/**
1222 * Reads VMCS fields into the VMXTRANSIENT structure.
1223 *
1224 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1225 * generating an optimized read sequences w/o any conditionals between in
1226 * non-strict builds.
1227 *
1228 * @tparam a_fReadMask The fields to read. One or more of the
1229 * HMVMX_READ_XXX fields ORed together.
1230 * @param pVCpu The cross context virtual CPU structure.
1231 * @param pVmxTransient The VMX-transient structure.
1232 */
1233template<uint32_t const a_fReadMask>
1234DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1235{
1236 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1237 | HMVMX_READ_EXIT_INSTR_LEN
1238 | HMVMX_READ_EXIT_INSTR_INFO
1239 | HMVMX_READ_IDT_VECTORING_INFO
1240 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1241 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1242 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1243 | HMVMX_READ_GUEST_LINEAR_ADDR
1244 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1245 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1246 )) == 0);
1247
1248 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1249 {
1250 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1251 {
1252 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1253 AssertRC(rc);
1254 }
1255 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1256 {
1257 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1258 AssertRC(rc);
1259 }
1260 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1261 {
1262 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1263 AssertRC(rc);
1264 }
1265 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1266 {
1267 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1268 AssertRC(rc);
1269 }
1270 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1271 {
1272 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1273 AssertRC(rc);
1274 }
1275 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1276 {
1277 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1278 AssertRC(rc);
1279 }
1280 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1281 {
1282 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1283 AssertRC(rc);
1284 }
1285 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1286 {
1287 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1288 AssertRC(rc);
1289 }
1290 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1291 {
1292 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1293 AssertRC(rc);
1294 }
1295 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1296 {
1297 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1298 AssertRC(rc);
1299 }
1300
1301 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1302 }
1303 else
1304 {
1305 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1306 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1307 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1308 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1309 }
1310}
1311
1312
1313#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1314/**
1315 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1316 *
1317 * @param pVCpu The cross context virtual CPU structure.
1318 * @param pVmxTransient The VMX-transient structure.
1319 */
1320static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1321{
1322 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1323 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1324 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1325 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1326 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1327 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1328 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1329 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1330 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1331 AssertRC(rc);
1332 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1333 | HMVMX_READ_EXIT_INSTR_LEN
1334 | HMVMX_READ_EXIT_INSTR_INFO
1335 | HMVMX_READ_IDT_VECTORING_INFO
1336 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1337 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1338 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1339 | HMVMX_READ_GUEST_LINEAR_ADDR
1340 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1341}
1342#endif
1343
1344/**
1345 * Verifies that our cached values of the VMCS fields are all consistent with
1346 * what's actually present in the VMCS.
1347 *
1348 * @returns VBox status code.
1349 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1350 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1351 * VMCS content. HMCPU error-field is
1352 * updated, see VMX_VCI_XXX.
1353 * @param pVCpu The cross context virtual CPU structure.
1354 * @param pVmcsInfo The VMCS info. object.
1355 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1356 */
1357static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1358{
1359 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1360
1361 uint32_t u32Val;
1362 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1363 AssertRC(rc);
1364 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1365 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1366 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1367 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1368
1369 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1370 AssertRC(rc);
1371 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1372 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1373 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1374 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1375
1376 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1377 AssertRC(rc);
1378 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1379 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1380 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1381 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1382
1383 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1384 AssertRC(rc);
1385 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1386 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1387 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1388 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1389
1390 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1391 {
1392 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1393 AssertRC(rc);
1394 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1395 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1396 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1397 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1398 }
1399
1400 uint64_t u64Val;
1401 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1402 {
1403 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1404 AssertRC(rc);
1405 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1406 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1407 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1408 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1409 }
1410
1411 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1412 AssertRC(rc);
1413 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1414 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1415 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1416 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1417
1418 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1419 AssertRC(rc);
1420 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1421 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1422 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1423 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1424
1425 NOREF(pcszVmcs);
1426 return VINF_SUCCESS;
1427}
1428
1429
1430/**
1431 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1432 * VMCS.
1433 *
1434 * This is typically required when the guest changes paging mode.
1435 *
1436 * @returns VBox status code.
1437 * @param pVCpu The cross context virtual CPU structure.
1438 * @param pVmxTransient The VMX-transient structure.
1439 *
1440 * @remarks Requires EFER.
1441 * @remarks No-long-jump zone!!!
1442 */
1443static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1444{
1445 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1446 {
1447 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1448 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1449
1450 /*
1451 * VM-entry controls.
1452 */
1453 {
1454 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1455 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1456
1457 /*
1458 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1459 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1460 *
1461 * For nested-guests, this is a mandatory VM-entry control. It's also
1462 * required because we do not want to leak host bits to the nested-guest.
1463 */
1464 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1465
1466 /*
1467 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1468 *
1469 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1470 * required to get the nested-guest working with hardware-assisted VMX execution.
1471 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1472 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1473 * here rather than while merging the guest VMCS controls.
1474 */
1475 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1476 {
1477 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1478 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1479 }
1480 else
1481 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1482
1483 /*
1484 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1485 *
1486 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1487 * regardless of whether the nested-guest VMCS specifies it because we are free to
1488 * load whatever MSRs we require and we do not need to modify the guest visible copy
1489 * of the VM-entry MSR load area.
1490 */
1491 if ( g_fHmVmxSupportsVmcsEfer
1492#ifndef IN_NEM_DARWIN
1493 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1494#endif
1495 )
1496 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1497 else
1498 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1499
1500 /*
1501 * The following should -not- be set (since we're not in SMM mode):
1502 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1503 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1504 */
1505
1506 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1507 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1508
1509 if ((fVal & fZap) == fVal)
1510 { /* likely */ }
1511 else
1512 {
1513 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1514 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1515 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1516 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1517 }
1518
1519 /* Commit it to the VMCS. */
1520 if (pVmcsInfo->u32EntryCtls != fVal)
1521 {
1522 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1523 AssertRC(rc);
1524 pVmcsInfo->u32EntryCtls = fVal;
1525 }
1526 }
1527
1528 /*
1529 * VM-exit controls.
1530 */
1531 {
1532 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1533 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1534
1535 /*
1536 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1537 * supported the 1-setting of this bit.
1538 *
1539 * For nested-guests, we set the "save debug controls" as the converse
1540 * "load debug controls" is mandatory for nested-guests anyway.
1541 */
1542 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1543
1544 /*
1545 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1546 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1547 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1548 * vmxHCExportHostMsrs().
1549 *
1550 * For nested-guests, we always set this bit as we do not support 32-bit
1551 * hosts.
1552 */
1553 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1554
1555#ifndef IN_NEM_DARWIN
1556 /*
1557 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1558 *
1559 * For nested-guests, we should use the "save IA32_EFER" control if we also
1560 * used the "load IA32_EFER" control while exporting VM-entry controls.
1561 */
1562 if ( g_fHmVmxSupportsVmcsEfer
1563 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1564 {
1565 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1566 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1567 }
1568#endif
1569
1570 /*
1571 * Enable saving of the VMX-preemption timer value on VM-exit.
1572 * For nested-guests, currently not exposed/used.
1573 */
1574 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1575 * the timer value. */
1576 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1577 {
1578 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1579 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1580 }
1581
1582 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1583 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1584
1585 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1586 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1587 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1588
1589 if ((fVal & fZap) == fVal)
1590 { /* likely */ }
1591 else
1592 {
1593 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1594 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1595 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1596 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1597 }
1598
1599 /* Commit it to the VMCS. */
1600 if (pVmcsInfo->u32ExitCtls != fVal)
1601 {
1602 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1603 AssertRC(rc);
1604 pVmcsInfo->u32ExitCtls = fVal;
1605 }
1606 }
1607
1608 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1609 }
1610 return VINF_SUCCESS;
1611}
1612
1613
1614/**
1615 * Sets the TPR threshold in the VMCS.
1616 *
1617 * @param pVCpu The cross context virtual CPU structure.
1618 * @param pVmcsInfo The VMCS info. object.
1619 * @param u32TprThreshold The TPR threshold (task-priority class only).
1620 */
1621DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1622{
1623 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1624 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1625 RT_NOREF(pVmcsInfo);
1626 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1627 AssertRC(rc);
1628}
1629
1630
1631/**
1632 * Exports the guest APIC TPR state into the VMCS.
1633 *
1634 * @param pVCpu The cross context virtual CPU structure.
1635 * @param pVmxTransient The VMX-transient structure.
1636 *
1637 * @remarks No-long-jump zone!!!
1638 */
1639static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1640{
1641 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1642 {
1643 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1644
1645 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1646 if (!pVmxTransient->fIsNestedGuest)
1647 {
1648 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1649 && APICIsEnabled(pVCpu))
1650 {
1651 /*
1652 * Setup TPR shadowing.
1653 */
1654 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1655 {
1656 bool fPendingIntr = false;
1657 uint8_t u8Tpr = 0;
1658 uint8_t u8PendingIntr = 0;
1659 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1660 AssertRC(rc);
1661
1662 /*
1663 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1664 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1665 * priority of the pending interrupt so we can deliver the interrupt. If there
1666 * are no interrupts pending, set threshold to 0 to not cause any
1667 * TPR-below-threshold VM-exits.
1668 */
1669 uint32_t u32TprThreshold = 0;
1670 if (fPendingIntr)
1671 {
1672 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1673 (which is the Task-Priority Class). */
1674 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1675 const uint8_t u8TprPriority = u8Tpr >> 4;
1676 if (u8PendingPriority <= u8TprPriority)
1677 u32TprThreshold = u8PendingPriority;
1678 }
1679
1680 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1681 }
1682 }
1683 }
1684 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1685 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1686 }
1687}
1688
1689
1690/**
1691 * Gets the guest interruptibility-state and updates related force-flags.
1692 *
1693 * @returns Guest's interruptibility-state.
1694 * @param pVCpu The cross context virtual CPU structure.
1695 *
1696 * @remarks No-long-jump zone!!!
1697 */
1698static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1699{
1700 /*
1701 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1702 */
1703 uint32_t fIntrState = 0;
1704 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1705 {
1706 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
1707 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
1708
1709 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1710 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
1711 {
1712 if (pCtx->eflags.Bits.u1IF)
1713 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1714 else
1715 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1716 }
1717 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1718 {
1719 /*
1720 * We can clear the inhibit force flag as even if we go back to the recompiler
1721 * without executing guest code in VT-x, the flag's condition to be cleared is
1722 * met and thus the cleared state is correct.
1723 */
1724 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1725 }
1726 }
1727
1728 /*
1729 * Check if we should inhibit NMI delivery.
1730 */
1731 if (CPUMIsGuestNmiBlocking(pVCpu))
1732 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1733
1734 /*
1735 * Validate.
1736 */
1737#ifdef VBOX_STRICT
1738 /* We don't support block-by-SMI yet.*/
1739 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1740
1741 /* Block-by-STI must not be set when interrupts are disabled. */
1742 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
1743 {
1744 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1745 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
1746 }
1747#endif
1748
1749 return fIntrState;
1750}
1751
1752
1753/**
1754 * Exports the exception intercepts required for guest execution in the VMCS.
1755 *
1756 * @param pVCpu The cross context virtual CPU structure.
1757 * @param pVmxTransient The VMX-transient structure.
1758 *
1759 * @remarks No-long-jump zone!!!
1760 */
1761static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1762{
1763 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1764 {
1765 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1766 if ( !pVmxTransient->fIsNestedGuest
1767 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1768 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1769 else
1770 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1771
1772 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1773 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1774 }
1775}
1776
1777
1778/**
1779 * Exports the guest's RIP into the guest-state area in the VMCS.
1780 *
1781 * @param pVCpu The cross context virtual CPU structure.
1782 *
1783 * @remarks No-long-jump zone!!!
1784 */
1785static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1786{
1787 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1788 {
1789 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1790
1791 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1792 AssertRC(rc);
1793
1794 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1795 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1796 }
1797}
1798
1799
1800/**
1801 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1802 *
1803 * @param pVCpu The cross context virtual CPU structure.
1804 * @param pVmxTransient The VMX-transient structure.
1805 *
1806 * @remarks No-long-jump zone!!!
1807 */
1808static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1809{
1810 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1811 {
1812 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1813
1814 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1815 Let us assert it as such and use 32-bit VMWRITE. */
1816 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1817 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1818 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
1819 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
1820
1821#ifndef IN_NEM_DARWIN
1822 /*
1823 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1824 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1825 * can run the real-mode guest code under Virtual 8086 mode.
1826 */
1827 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1828 if (pVmcsInfo->RealMode.fRealOnV86Active)
1829 {
1830 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1831 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1832 Assert(!pVmxTransient->fIsNestedGuest);
1833 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
1834 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
1835 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1836 }
1837#else
1838 RT_NOREF(pVmxTransient);
1839#endif
1840
1841 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
1842 AssertRC(rc);
1843
1844 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1845 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
1846 }
1847}
1848
1849
1850#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1851/**
1852 * Copies the nested-guest VMCS to the shadow VMCS.
1853 *
1854 * @returns VBox status code.
1855 * @param pVCpu The cross context virtual CPU structure.
1856 * @param pVmcsInfo The VMCS info. object.
1857 *
1858 * @remarks No-long-jump zone!!!
1859 */
1860static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1861{
1862 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1863 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1864
1865 /*
1866 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1867 * current VMCS, as we may try saving guest lazy MSRs.
1868 *
1869 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1870 * calling the import VMCS code which is currently performing the guest MSR reads
1871 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1872 * and the rest of the VMX leave session machinery.
1873 */
1874 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1875
1876 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1877 if (RT_SUCCESS(rc))
1878 {
1879 /*
1880 * Copy all guest read/write VMCS fields.
1881 *
1882 * We don't check for VMWRITE failures here for performance reasons and
1883 * because they are not expected to fail, barring irrecoverable conditions
1884 * like hardware errors.
1885 */
1886 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1887 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1888 {
1889 uint64_t u64Val;
1890 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1891 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1892 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1893 }
1894
1895 /*
1896 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1897 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1898 */
1899 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1900 {
1901 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1902 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1903 {
1904 uint64_t u64Val;
1905 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1906 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1907 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1908 }
1909 }
1910
1911 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1912 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1913 }
1914
1915 ASMSetFlags(fEFlags);
1916 return rc;
1917}
1918
1919
1920/**
1921 * Copies the shadow VMCS to the nested-guest VMCS.
1922 *
1923 * @returns VBox status code.
1924 * @param pVCpu The cross context virtual CPU structure.
1925 * @param pVmcsInfo The VMCS info. object.
1926 *
1927 * @remarks Called with interrupts disabled.
1928 */
1929static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1930{
1931 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1932 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1933 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1934
1935 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1936 if (RT_SUCCESS(rc))
1937 {
1938 /*
1939 * Copy guest read/write fields from the shadow VMCS.
1940 * Guest read-only fields cannot be modified, so no need to copy them.
1941 *
1942 * We don't check for VMREAD failures here for performance reasons and
1943 * because they are not expected to fail, barring irrecoverable conditions
1944 * like hardware errors.
1945 */
1946 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1947 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1948 {
1949 uint64_t u64Val;
1950 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1951 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1952 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1953 }
1954
1955 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1956 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1957 }
1958 return rc;
1959}
1960
1961
1962/**
1963 * Enables VMCS shadowing for the given VMCS info. object.
1964 *
1965 * @param pVCpu The cross context virtual CPU structure.
1966 * @param pVmcsInfo The VMCS info. object.
1967 *
1968 * @remarks No-long-jump zone!!!
1969 */
1970static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1971{
1972 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1973 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1974 {
1975 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1976 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1977 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1978 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1979 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1980 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1981 Log4Func(("Enabled\n"));
1982 }
1983}
1984
1985
1986/**
1987 * Disables VMCS shadowing for the given VMCS info. object.
1988 *
1989 * @param pVCpu The cross context virtual CPU structure.
1990 * @param pVmcsInfo The VMCS info. object.
1991 *
1992 * @remarks No-long-jump zone!!!
1993 */
1994static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1995{
1996 /*
1997 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1998 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1999 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2000 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2001 *
2002 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2003 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2004 */
2005 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2006 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2007 {
2008 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2009 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2010 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2011 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2012 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2013 Log4Func(("Disabled\n"));
2014 }
2015}
2016#endif
2017
2018
2019/**
2020 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2021 *
2022 * The guest FPU state is always pre-loaded hence we don't need to bother about
2023 * sharing FPU related CR0 bits between the guest and host.
2024 *
2025 * @returns VBox status code.
2026 * @param pVCpu The cross context virtual CPU structure.
2027 * @param pVmxTransient The VMX-transient structure.
2028 *
2029 * @remarks No-long-jump zone!!!
2030 */
2031static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2032{
2033 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2034 {
2035 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2036 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2037
2038 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2039 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2040 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2041 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2042 else
2043 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2044
2045 if (!pVmxTransient->fIsNestedGuest)
2046 {
2047 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2048 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2049 uint64_t const u64ShadowCr0 = u64GuestCr0;
2050 Assert(!RT_HI_U32(u64GuestCr0));
2051
2052 /*
2053 * Setup VT-x's view of the guest CR0.
2054 */
2055 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2056 if (VM_IS_VMX_NESTED_PAGING(pVM))
2057 {
2058#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2059 if (CPUMIsGuestPagingEnabled(pVCpu))
2060 {
2061 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2062 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2063 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2064 }
2065 else
2066 {
2067 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2068 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2069 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2070 }
2071
2072 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2073 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2074 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2075#endif
2076 }
2077 else
2078 {
2079 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2080 u64GuestCr0 |= X86_CR0_WP;
2081 }
2082
2083 /*
2084 * Guest FPU bits.
2085 *
2086 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2087 * using CR0.TS.
2088 *
2089 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2090 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2091 */
2092 u64GuestCr0 |= X86_CR0_NE;
2093
2094 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2095 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2096
2097 /*
2098 * Update exception intercepts.
2099 */
2100 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2101#ifndef IN_NEM_DARWIN
2102 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2103 {
2104 Assert(PDMVmmDevHeapIsEnabled(pVM));
2105 Assert(pVM->hm.s.vmx.pRealModeTSS);
2106 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2107 }
2108 else
2109#endif
2110 {
2111 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2112 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2113 if (fInterceptMF)
2114 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2115 }
2116
2117 /* Additional intercepts for debugging, define these yourself explicitly. */
2118#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2119 uXcptBitmap |= 0
2120 | RT_BIT(X86_XCPT_BP)
2121 | RT_BIT(X86_XCPT_DE)
2122 | RT_BIT(X86_XCPT_NM)
2123 | RT_BIT(X86_XCPT_TS)
2124 | RT_BIT(X86_XCPT_UD)
2125 | RT_BIT(X86_XCPT_NP)
2126 | RT_BIT(X86_XCPT_SS)
2127 | RT_BIT(X86_XCPT_GP)
2128 | RT_BIT(X86_XCPT_PF)
2129 | RT_BIT(X86_XCPT_MF)
2130 ;
2131#elif defined(HMVMX_ALWAYS_TRAP_PF)
2132 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2133#endif
2134 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2135 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2136 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2137 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2138 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2139
2140 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2141 u64GuestCr0 |= fSetCr0;
2142 u64GuestCr0 &= fZapCr0;
2143 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2144
2145 /* Commit the CR0 and related fields to the guest VMCS. */
2146 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2147 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2148 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2149 {
2150 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2151 AssertRC(rc);
2152 }
2153 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2154 {
2155 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2156 AssertRC(rc);
2157 }
2158
2159 /* Update our caches. */
2160 pVmcsInfo->u32ProcCtls = uProcCtls;
2161 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2162
2163 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2164 }
2165 else
2166 {
2167 /*
2168 * With nested-guests, we may have extended the guest/host mask here since we
2169 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2170 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2171 * originally supplied. We must copy those bits from the nested-guest CR0 into
2172 * the nested-guest CR0 read-shadow.
2173 */
2174 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2175 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2176 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2177 Assert(!RT_HI_U32(u64GuestCr0));
2178 Assert(u64GuestCr0 & X86_CR0_NE);
2179
2180 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2181 u64GuestCr0 |= fSetCr0;
2182 u64GuestCr0 &= fZapCr0;
2183 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2184
2185 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2186 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2187 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2188
2189 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2190 }
2191
2192 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2193 }
2194
2195 return VINF_SUCCESS;
2196}
2197
2198
2199/**
2200 * Exports the guest control registers (CR3, CR4) into the guest-state area
2201 * in the VMCS.
2202 *
2203 * @returns VBox strict status code.
2204 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2205 * without unrestricted guest access and the VMMDev is not presently
2206 * mapped (e.g. EFI32).
2207 *
2208 * @param pVCpu The cross context virtual CPU structure.
2209 * @param pVmxTransient The VMX-transient structure.
2210 *
2211 * @remarks No-long-jump zone!!!
2212 */
2213static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2214{
2215 int rc = VINF_SUCCESS;
2216 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2217
2218 /*
2219 * Guest CR2.
2220 * It's always loaded in the assembler code. Nothing to do here.
2221 */
2222
2223 /*
2224 * Guest CR3.
2225 */
2226 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2227 {
2228 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2229
2230 if (VM_IS_VMX_NESTED_PAGING(pVM))
2231 {
2232#ifndef IN_NEM_DARWIN
2233 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2234 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2235
2236 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2237 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2238 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2239 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2240
2241 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2242 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2243 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2244
2245 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2246 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2247 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2248 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2249 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2250 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2251 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2252
2253 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2254 AssertRC(rc);
2255#endif
2256
2257 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2258 uint64_t u64GuestCr3 = pCtx->cr3;
2259 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2260 || CPUMIsGuestPagingEnabledEx(pCtx))
2261 {
2262 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2263 if (CPUMIsGuestInPAEModeEx(pCtx))
2264 {
2265 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2266 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2267 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2268 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2269 }
2270
2271 /*
2272 * The guest's view of its CR3 is unblemished with nested paging when the
2273 * guest is using paging or we have unrestricted guest execution to handle
2274 * the guest when it's not using paging.
2275 */
2276 }
2277#ifndef IN_NEM_DARWIN
2278 else
2279 {
2280 /*
2281 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2282 * thinks it accesses physical memory directly, we use our identity-mapped
2283 * page table to map guest-linear to guest-physical addresses. EPT takes care
2284 * of translating it to host-physical addresses.
2285 */
2286 RTGCPHYS GCPhys;
2287 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2288
2289 /* We obtain it here every time as the guest could have relocated this PCI region. */
2290 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2291 if (RT_SUCCESS(rc))
2292 { /* likely */ }
2293 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2294 {
2295 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2296 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2297 }
2298 else
2299 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2300
2301 u64GuestCr3 = GCPhys;
2302 }
2303#endif
2304
2305 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2306 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2307 AssertRC(rc);
2308 }
2309 else
2310 {
2311 Assert(!pVmxTransient->fIsNestedGuest);
2312 /* Non-nested paging case, just use the hypervisor's CR3. */
2313 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2314
2315 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2316 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2317 AssertRC(rc);
2318 }
2319
2320 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2321 }
2322
2323 /*
2324 * Guest CR4.
2325 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2326 */
2327 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2328 {
2329 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2330 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2331
2332 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2333 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2334
2335 /*
2336 * With nested-guests, we may have extended the guest/host mask here (since we
2337 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2338 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2339 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2340 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2341 */
2342 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2343 uint64_t u64GuestCr4 = pCtx->cr4;
2344 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2345 ? pCtx->cr4
2346 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2347 Assert(!RT_HI_U32(u64GuestCr4));
2348
2349#ifndef IN_NEM_DARWIN
2350 /*
2351 * Setup VT-x's view of the guest CR4.
2352 *
2353 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2354 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2355 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2356 *
2357 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2358 */
2359 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2360 {
2361 Assert(pVM->hm.s.vmx.pRealModeTSS);
2362 Assert(PDMVmmDevHeapIsEnabled(pVM));
2363 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2364 }
2365#endif
2366
2367 if (VM_IS_VMX_NESTED_PAGING(pVM))
2368 {
2369 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2370 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2371 {
2372 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2373 u64GuestCr4 |= X86_CR4_PSE;
2374 /* Our identity mapping is a 32-bit page directory. */
2375 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2376 }
2377 /* else use guest CR4.*/
2378 }
2379 else
2380 {
2381 Assert(!pVmxTransient->fIsNestedGuest);
2382
2383 /*
2384 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2385 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2386 */
2387 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2388 {
2389 case PGMMODE_REAL: /* Real-mode. */
2390 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2391 case PGMMODE_32_BIT: /* 32-bit paging. */
2392 {
2393 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2394 break;
2395 }
2396
2397 case PGMMODE_PAE: /* PAE paging. */
2398 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2399 {
2400 u64GuestCr4 |= X86_CR4_PAE;
2401 break;
2402 }
2403
2404 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2405 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2406 {
2407#ifdef VBOX_WITH_64_BITS_GUESTS
2408 /* For our assumption in vmxHCShouldSwapEferMsr. */
2409 Assert(u64GuestCr4 & X86_CR4_PAE);
2410 break;
2411#endif
2412 }
2413 default:
2414 AssertFailed();
2415 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2416 }
2417 }
2418
2419 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2420 u64GuestCr4 |= fSetCr4;
2421 u64GuestCr4 &= fZapCr4;
2422
2423 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2424 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2425 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2426
2427#ifndef IN_NEM_DARWIN
2428 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2429 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2430 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2431 {
2432 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2433 hmR0VmxUpdateStartVmFunction(pVCpu);
2434 }
2435#endif
2436
2437 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2438
2439 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2440 }
2441 return rc;
2442}
2443
2444
2445#ifdef VBOX_STRICT
2446/**
2447 * Strict function to validate segment registers.
2448 *
2449 * @param pVCpu The cross context virtual CPU structure.
2450 * @param pVmcsInfo The VMCS info. object.
2451 *
2452 * @remarks Will import guest CR0 on strict builds during validation of
2453 * segments.
2454 */
2455static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2456{
2457 /*
2458 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2459 *
2460 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2461 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2462 * unusable bit and doesn't change the guest-context value.
2463 */
2464 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2465 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2466 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2467 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2468 && ( !CPUMIsGuestInRealModeEx(pCtx)
2469 && !CPUMIsGuestInV86ModeEx(pCtx)))
2470 {
2471 /* Protected mode checks */
2472 /* CS */
2473 Assert(pCtx->cs.Attr.n.u1Present);
2474 Assert(!(pCtx->cs.Attr.u & 0xf00));
2475 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2476 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2477 || !(pCtx->cs.Attr.n.u1Granularity));
2478 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2479 || (pCtx->cs.Attr.n.u1Granularity));
2480 /* CS cannot be loaded with NULL in protected mode. */
2481 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2482 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2483 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2484 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2485 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2486 else
2487 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2488 /* SS */
2489 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2490 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2491 if ( !(pCtx->cr0 & X86_CR0_PE)
2492 || pCtx->cs.Attr.n.u4Type == 3)
2493 {
2494 Assert(!pCtx->ss.Attr.n.u2Dpl);
2495 }
2496 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2497 {
2498 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2499 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2500 Assert(pCtx->ss.Attr.n.u1Present);
2501 Assert(!(pCtx->ss.Attr.u & 0xf00));
2502 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2503 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2504 || !(pCtx->ss.Attr.n.u1Granularity));
2505 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2506 || (pCtx->ss.Attr.n.u1Granularity));
2507 }
2508 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2509 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2510 {
2511 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2512 Assert(pCtx->ds.Attr.n.u1Present);
2513 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2514 Assert(!(pCtx->ds.Attr.u & 0xf00));
2515 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2516 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2517 || !(pCtx->ds.Attr.n.u1Granularity));
2518 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2519 || (pCtx->ds.Attr.n.u1Granularity));
2520 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2521 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2522 }
2523 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2524 {
2525 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2526 Assert(pCtx->es.Attr.n.u1Present);
2527 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2528 Assert(!(pCtx->es.Attr.u & 0xf00));
2529 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2530 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2531 || !(pCtx->es.Attr.n.u1Granularity));
2532 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2533 || (pCtx->es.Attr.n.u1Granularity));
2534 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2535 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2536 }
2537 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2538 {
2539 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2540 Assert(pCtx->fs.Attr.n.u1Present);
2541 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2542 Assert(!(pCtx->fs.Attr.u & 0xf00));
2543 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2544 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2545 || !(pCtx->fs.Attr.n.u1Granularity));
2546 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2547 || (pCtx->fs.Attr.n.u1Granularity));
2548 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2549 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2550 }
2551 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2552 {
2553 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2554 Assert(pCtx->gs.Attr.n.u1Present);
2555 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2556 Assert(!(pCtx->gs.Attr.u & 0xf00));
2557 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2558 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2559 || !(pCtx->gs.Attr.n.u1Granularity));
2560 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2561 || (pCtx->gs.Attr.n.u1Granularity));
2562 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2563 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2564 }
2565 /* 64-bit capable CPUs. */
2566 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2567 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2568 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2569 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2570 }
2571 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2572 || ( CPUMIsGuestInRealModeEx(pCtx)
2573 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2574 {
2575 /* Real and v86 mode checks. */
2576 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2577 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2578#ifndef IN_NEM_DARWIN
2579 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2580 {
2581 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2582 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2583 }
2584 else
2585#endif
2586 {
2587 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2588 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2589 }
2590
2591 /* CS */
2592 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2593 Assert(pCtx->cs.u32Limit == 0xffff);
2594 Assert(u32CSAttr == 0xf3);
2595 /* SS */
2596 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2597 Assert(pCtx->ss.u32Limit == 0xffff);
2598 Assert(u32SSAttr == 0xf3);
2599 /* DS */
2600 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2601 Assert(pCtx->ds.u32Limit == 0xffff);
2602 Assert(u32DSAttr == 0xf3);
2603 /* ES */
2604 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2605 Assert(pCtx->es.u32Limit == 0xffff);
2606 Assert(u32ESAttr == 0xf3);
2607 /* FS */
2608 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2609 Assert(pCtx->fs.u32Limit == 0xffff);
2610 Assert(u32FSAttr == 0xf3);
2611 /* GS */
2612 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2613 Assert(pCtx->gs.u32Limit == 0xffff);
2614 Assert(u32GSAttr == 0xf3);
2615 /* 64-bit capable CPUs. */
2616 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2617 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2618 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2619 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2620 }
2621}
2622#endif /* VBOX_STRICT */
2623
2624
2625/**
2626 * Exports a guest segment register into the guest-state area in the VMCS.
2627 *
2628 * @returns VBox status code.
2629 * @param pVCpu The cross context virtual CPU structure.
2630 * @param pVmcsInfo The VMCS info. object.
2631 * @param iSegReg The segment register number (X86_SREG_XXX).
2632 * @param pSelReg Pointer to the segment selector.
2633 *
2634 * @remarks No-long-jump zone!!!
2635 */
2636static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2637{
2638 Assert(iSegReg < X86_SREG_COUNT);
2639
2640 uint32_t u32Access = pSelReg->Attr.u;
2641#ifndef IN_NEM_DARWIN
2642 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2643#endif
2644 {
2645 /*
2646 * The way to differentiate between whether this is really a null selector or was just
2647 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2648 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2649 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2650 * NULL selectors loaded in protected-mode have their attribute as 0.
2651 */
2652 if (u32Access)
2653 { }
2654 else
2655 u32Access = X86DESCATTR_UNUSABLE;
2656 }
2657#ifndef IN_NEM_DARWIN
2658 else
2659 {
2660 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2661 u32Access = 0xf3;
2662 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2663 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2664 RT_NOREF_PV(pVCpu);
2665 }
2666#else
2667 RT_NOREF(pVmcsInfo);
2668#endif
2669
2670 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2671 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2672 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2673
2674 /*
2675 * Commit it to the VMCS.
2676 */
2677 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2678 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2679 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2680 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2681 return VINF_SUCCESS;
2682}
2683
2684
2685/**
2686 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2687 * area in the VMCS.
2688 *
2689 * @returns VBox status code.
2690 * @param pVCpu The cross context virtual CPU structure.
2691 * @param pVmxTransient The VMX-transient structure.
2692 *
2693 * @remarks Will import guest CR0 on strict builds during validation of
2694 * segments.
2695 * @remarks No-long-jump zone!!!
2696 */
2697static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2698{
2699 int rc = VERR_INTERNAL_ERROR_5;
2700#ifndef IN_NEM_DARWIN
2701 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2702#endif
2703 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2704 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2705#ifndef IN_NEM_DARWIN
2706 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2707#endif
2708
2709 /*
2710 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2711 */
2712 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2713 {
2714 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2715 {
2716 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2717#ifndef IN_NEM_DARWIN
2718 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2719 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2720#endif
2721 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2722 AssertRC(rc);
2723 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2724 }
2725
2726 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2727 {
2728 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2729#ifndef IN_NEM_DARWIN
2730 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2731 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2732#endif
2733 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2734 AssertRC(rc);
2735 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2736 }
2737
2738 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2739 {
2740 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2741#ifndef IN_NEM_DARWIN
2742 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2743 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2744#endif
2745 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2746 AssertRC(rc);
2747 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2748 }
2749
2750 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2751 {
2752 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2753#ifndef IN_NEM_DARWIN
2754 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2755 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2756#endif
2757 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2758 AssertRC(rc);
2759 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2760 }
2761
2762 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2763 {
2764 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2765#ifndef IN_NEM_DARWIN
2766 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2767 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2768#endif
2769 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2770 AssertRC(rc);
2771 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2772 }
2773
2774 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2775 {
2776 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2777#ifndef IN_NEM_DARWIN
2778 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2779 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2780#endif
2781 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2782 AssertRC(rc);
2783 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2784 }
2785
2786#ifdef VBOX_STRICT
2787 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2788#endif
2789 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2790 pCtx->cs.Attr.u));
2791 }
2792
2793 /*
2794 * Guest TR.
2795 */
2796 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2797 {
2798 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2799
2800 /*
2801 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2802 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2803 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2804 */
2805 uint16_t u16Sel;
2806 uint32_t u32Limit;
2807 uint64_t u64Base;
2808 uint32_t u32AccessRights;
2809#ifndef IN_NEM_DARWIN
2810 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2811#endif
2812 {
2813 u16Sel = pCtx->tr.Sel;
2814 u32Limit = pCtx->tr.u32Limit;
2815 u64Base = pCtx->tr.u64Base;
2816 u32AccessRights = pCtx->tr.Attr.u;
2817 }
2818#ifndef IN_NEM_DARWIN
2819 else
2820 {
2821 Assert(!pVmxTransient->fIsNestedGuest);
2822 Assert(pVM->hm.s.vmx.pRealModeTSS);
2823 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2824
2825 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2826 RTGCPHYS GCPhys;
2827 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2828 AssertRCReturn(rc, rc);
2829
2830 X86DESCATTR DescAttr;
2831 DescAttr.u = 0;
2832 DescAttr.n.u1Present = 1;
2833 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2834
2835 u16Sel = 0;
2836 u32Limit = HM_VTX_TSS_SIZE;
2837 u64Base = GCPhys;
2838 u32AccessRights = DescAttr.u;
2839 }
2840#endif
2841
2842 /* Validate. */
2843 Assert(!(u16Sel & RT_BIT(2)));
2844 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2845 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2846 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2847 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2848 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2849 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2850 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2851 Assert( (u32Limit & 0xfff) == 0xfff
2852 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2853 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2854 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2855
2856 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2857 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2858 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2859 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2860
2861 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2862 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2863 }
2864
2865 /*
2866 * Guest GDTR.
2867 */
2868 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2869 {
2870 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2871
2872 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2873 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2874
2875 /* Validate. */
2876 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2877
2878 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2879 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2880 }
2881
2882 /*
2883 * Guest LDTR.
2884 */
2885 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2886 {
2887 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2888
2889 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2890 uint32_t u32Access;
2891 if ( !pVmxTransient->fIsNestedGuest
2892 && !pCtx->ldtr.Attr.u)
2893 u32Access = X86DESCATTR_UNUSABLE;
2894 else
2895 u32Access = pCtx->ldtr.Attr.u;
2896
2897 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2898 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2899 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2900 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2901
2902 /* Validate. */
2903 if (!(u32Access & X86DESCATTR_UNUSABLE))
2904 {
2905 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2906 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2907 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2908 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2909 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2910 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2911 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2912 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2913 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2914 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2915 }
2916
2917 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2918 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2919 }
2920
2921 /*
2922 * Guest IDTR.
2923 */
2924 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2925 {
2926 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2927
2928 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2929 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2930
2931 /* Validate. */
2932 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2933
2934 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2935 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2936 }
2937
2938 return VINF_SUCCESS;
2939}
2940
2941
2942/**
2943 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2944 * VM-exit interruption info type.
2945 *
2946 * @returns The IEM exception flags.
2947 * @param uVector The event vector.
2948 * @param uVmxEventType The VMX event type.
2949 *
2950 * @remarks This function currently only constructs flags required for
2951 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2952 * and CR2 aspects of an exception are not included).
2953 */
2954static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2955{
2956 uint32_t fIemXcptFlags;
2957 switch (uVmxEventType)
2958 {
2959 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2960 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2961 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2962 break;
2963
2964 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2965 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2966 break;
2967
2968 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2969 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2970 break;
2971
2972 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2973 {
2974 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2975 if (uVector == X86_XCPT_BP)
2976 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2977 else if (uVector == X86_XCPT_OF)
2978 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2979 else
2980 {
2981 fIemXcptFlags = 0;
2982 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2983 }
2984 break;
2985 }
2986
2987 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2988 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2989 break;
2990
2991 default:
2992 fIemXcptFlags = 0;
2993 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
2994 break;
2995 }
2996 return fIemXcptFlags;
2997}
2998
2999
3000/**
3001 * Sets an event as a pending event to be injected into the guest.
3002 *
3003 * @param pVCpu The cross context virtual CPU structure.
3004 * @param u32IntInfo The VM-entry interruption-information field.
3005 * @param cbInstr The VM-entry instruction length in bytes (for
3006 * software interrupts, exceptions and privileged
3007 * software exceptions).
3008 * @param u32ErrCode The VM-entry exception error code.
3009 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3010 * page-fault.
3011 */
3012DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3013 RTGCUINTPTR GCPtrFaultAddress)
3014{
3015 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3016 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3017 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3018 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3019 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3020 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3021}
3022
3023
3024/**
3025 * Sets an external interrupt as pending-for-injection into the VM.
3026 *
3027 * @param pVCpu The cross context virtual CPU structure.
3028 * @param u8Interrupt The external interrupt vector.
3029 */
3030DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3031{
3032 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3033 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3034 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3035 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3036 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3037}
3038
3039
3040/**
3041 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3042 *
3043 * @param pVCpu The cross context virtual CPU structure.
3044 */
3045DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3046{
3047 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3048 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3049 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3050 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3051 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3052}
3053
3054
3055/**
3056 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3057 *
3058 * @param pVCpu The cross context virtual CPU structure.
3059 */
3060DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3061{
3062 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3063 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3064 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3065 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3066 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3067}
3068
3069
3070/**
3071 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3072 *
3073 * @param pVCpu The cross context virtual CPU structure.
3074 */
3075DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3076{
3077 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3078 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3079 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3080 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3081 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3082}
3083
3084
3085/**
3086 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3087 *
3088 * @param pVCpu The cross context virtual CPU structure.
3089 */
3090DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3091{
3092 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3093 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3094 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3095 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3096 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3097}
3098
3099
3100#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3101/**
3102 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3103 *
3104 * @param pVCpu The cross context virtual CPU structure.
3105 * @param u32ErrCode The error code for the general-protection exception.
3106 */
3107DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3108{
3109 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3110 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3111 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3112 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3113 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3114}
3115
3116
3117/**
3118 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3119 *
3120 * @param pVCpu The cross context virtual CPU structure.
3121 * @param u32ErrCode The error code for the stack exception.
3122 */
3123DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3124{
3125 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3126 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3127 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3128 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3129 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3130}
3131#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3132
3133
3134/**
3135 * Fixes up attributes for the specified segment register.
3136 *
3137 * @param pVCpu The cross context virtual CPU structure.
3138 * @param pSelReg The segment register that needs fixing.
3139 * @param pszRegName The register name (for logging and assertions).
3140 */
3141static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3142{
3143 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3144
3145 /*
3146 * If VT-x marks the segment as unusable, most other bits remain undefined:
3147 * - For CS the L, D and G bits have meaning.
3148 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3149 * - For the remaining data segments no bits are defined.
3150 *
3151 * The present bit and the unusable bit has been observed to be set at the
3152 * same time (the selector was supposed to be invalid as we started executing
3153 * a V8086 interrupt in ring-0).
3154 *
3155 * What should be important for the rest of the VBox code, is that the P bit is
3156 * cleared. Some of the other VBox code recognizes the unusable bit, but
3157 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3158 * safe side here, we'll strip off P and other bits we don't care about. If
3159 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3160 *
3161 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3162 */
3163#ifdef VBOX_STRICT
3164 uint32_t const uAttr = pSelReg->Attr.u;
3165#endif
3166
3167 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3168 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3169 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3170
3171#ifdef VBOX_STRICT
3172# ifndef IN_NEM_DARWIN
3173 VMMRZCallRing3Disable(pVCpu);
3174# endif
3175 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3176# ifdef DEBUG_bird
3177 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3178 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3179 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3180# endif
3181# ifndef IN_NEM_DARWIN
3182 VMMRZCallRing3Enable(pVCpu);
3183# endif
3184 NOREF(uAttr);
3185#endif
3186 RT_NOREF2(pVCpu, pszRegName);
3187}
3188
3189
3190/**
3191 * Imports a guest segment register from the current VMCS into the guest-CPU
3192 * context.
3193 *
3194 * @param pVCpu The cross context virtual CPU structure.
3195 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3196 *
3197 * @remarks Called with interrupts and/or preemption disabled.
3198 */
3199template<uint32_t const a_iSegReg>
3200DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3201{
3202 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3203 /* Check that the macros we depend upon here and in the export parenter function works: */
3204#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3205 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3206 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3207 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3208 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3209 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3210 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3211 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3212 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3213 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3214 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3215
3216 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3217
3218 uint16_t u16Sel;
3219 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3220 pSelReg->Sel = u16Sel;
3221 pSelReg->ValidSel = u16Sel;
3222
3223 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3224 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3225
3226 uint32_t u32Attr;
3227 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3228 pSelReg->Attr.u = u32Attr;
3229 if (u32Attr & X86DESCATTR_UNUSABLE)
3230 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3231
3232 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3233}
3234
3235
3236/**
3237 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3238 *
3239 * @param pVCpu The cross context virtual CPU structure.
3240 *
3241 * @remarks Called with interrupts and/or preemption disabled.
3242 */
3243DECLINLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3244{
3245 uint16_t u16Sel;
3246 uint64_t u64Base;
3247 uint32_t u32Limit, u32Attr;
3248 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3249 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3250 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3251 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3252
3253 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3254 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3255 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3256 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3257 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3258 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3259 if (u32Attr & X86DESCATTR_UNUSABLE)
3260 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3261}
3262
3263
3264/**
3265 * Imports the guest TR from the current VMCS into the guest-CPU context.
3266 *
3267 * @param pVCpu The cross context virtual CPU structure.
3268 *
3269 * @remarks Called with interrupts and/or preemption disabled.
3270 */
3271DECLINLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3272{
3273 uint16_t u16Sel;
3274 uint64_t u64Base;
3275 uint32_t u32Limit, u32Attr;
3276 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3277 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3278 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3279 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3280
3281 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3282 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3283 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3284 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3285 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3286 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3287 /* TR is the only selector that can never be unusable. */
3288 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3289}
3290
3291
3292/**
3293 * Core: Imports the guest RIP from the VMCS back into the guest-CPU context.
3294 *
3295 * @returns The RIP value.
3296 * @param pVCpu The cross context virtual CPU structure.
3297 *
3298 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3299 * @remarks Do -not- call this function directly!
3300 */
3301DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3302{
3303 uint64_t u64Val;
3304 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3305 AssertRC(rc);
3306
3307 pVCpu->cpum.GstCtx.rip = u64Val;
3308
3309 return u64Val;
3310}
3311
3312
3313/**
3314 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3315 *
3316 * @param pVCpu The cross context virtual CPU structure.
3317 *
3318 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3319 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3320 * instead!!!
3321 */
3322DECLINLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3323{
3324 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3325 {
3326 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3327 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3328 }
3329}
3330
3331
3332/**
3333 * Core: Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3334 *
3335 * @param pVCpu The cross context virtual CPU structure.
3336 * @param pVmcsInfo The VMCS info. object.
3337 *
3338 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3339 * @remarks Do -not- call this function directly!
3340 */
3341DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3342{
3343 uint64_t u64Val;
3344 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3345 AssertRC(rc);
3346
3347 pVCpu->cpum.GstCtx.rflags.u64 = u64Val;
3348#ifndef IN_NEM_DARWIN
3349 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3350 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3351 {
3352 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3353 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3354 }
3355#else
3356 RT_NOREF(pVmcsInfo);
3357#endif
3358}
3359
3360
3361/**
3362 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3363 *
3364 * @param pVCpu The cross context virtual CPU structure.
3365 * @param pVmcsInfo The VMCS info. object.
3366 *
3367 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3368 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3369 * instead!!!
3370 */
3371DECLINLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3372{
3373 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3374 {
3375 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3376 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3377 }
3378}
3379
3380
3381/**
3382 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3383 * context.
3384 *
3385 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3386 *
3387 * @param pVCpu The cross context virtual CPU structure.
3388 * @param pVmcsInfo The VMCS info. object.
3389 *
3390 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3391 * do not log!
3392 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3393 * instead!!!
3394 */
3395DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3396{
3397 uint32_t u32Val;
3398 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3399 if (!u32Val)
3400 {
3401 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3402 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3403/** @todo r=bird: This is a call which isn't necessary most of the time, this
3404 * path is taken on basically all exits. Try find a way to eliminating it. */
3405 CPUMSetGuestNmiBlocking(pVCpu, false);
3406 }
3407 else
3408 {
3409/** @todo consider this branch for non-inlining. */
3410 /*
3411 * We must import RIP here to set our EM interrupt-inhibited state.
3412 * We also import RFLAGS as our code that evaluates pending interrupts
3413 * before VM-entry requires it.
3414 */
3415 vmxHCImportGuestRip(pVCpu);
3416 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3417
3418 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3419 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
3420 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3421 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3422
3423 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
3424 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
3425 }
3426}
3427
3428
3429/**
3430 * Worker for VMXR0ImportStateOnDemand.
3431 *
3432 * @returns VBox status code.
3433 * @param pVCpu The cross context virtual CPU structure.
3434 * @param pVmcsInfo The VMCS info. object.
3435 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3436 */
3437static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3438{
3439 int rc = VINF_SUCCESS;
3440 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3441 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3442 uint32_t u32Val;
3443
3444 /*
3445 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3446 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3447 * neither are other host platforms.
3448 *
3449 * Committing this temporarily as it prevents BSOD.
3450 *
3451 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3452 */
3453#ifdef RT_OS_WINDOWS
3454 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3455 return VERR_HM_IPE_1;
3456#endif
3457
3458 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3459
3460#ifndef IN_NEM_DARWIN
3461 /*
3462 * We disable interrupts to make the updating of the state and in particular
3463 * the fExtrn modification atomic wrt to preemption hooks.
3464 */
3465 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3466#endif
3467
3468 fWhat &= pCtx->fExtrn;
3469 if (fWhat)
3470 {
3471 do
3472 {
3473 if (fWhat & CPUMCTX_EXTRN_RIP)
3474 vmxHCImportGuestRip(pVCpu);
3475
3476 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3477 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3478
3479 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3480 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3481 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3482
3483 if (fWhat & CPUMCTX_EXTRN_RSP)
3484 {
3485 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3486 AssertRC(rc);
3487 }
3488
3489 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3490 {
3491 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3492#ifndef IN_NEM_DARWIN
3493 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3494#else
3495 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3496#endif
3497 if (fWhat & CPUMCTX_EXTRN_CS)
3498 {
3499 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3500 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3501 if (fRealOnV86Active)
3502 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3503 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3504 }
3505 if (fWhat & CPUMCTX_EXTRN_SS)
3506 {
3507 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3508 if (fRealOnV86Active)
3509 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3510 }
3511 if (fWhat & CPUMCTX_EXTRN_DS)
3512 {
3513 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3514 if (fRealOnV86Active)
3515 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3516 }
3517 if (fWhat & CPUMCTX_EXTRN_ES)
3518 {
3519 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3520 if (fRealOnV86Active)
3521 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3522 }
3523 if (fWhat & CPUMCTX_EXTRN_FS)
3524 {
3525 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3526 if (fRealOnV86Active)
3527 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3528 }
3529 if (fWhat & CPUMCTX_EXTRN_GS)
3530 {
3531 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3532 if (fRealOnV86Active)
3533 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3534 }
3535 }
3536
3537 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3538 {
3539 if (fWhat & CPUMCTX_EXTRN_LDTR)
3540 vmxHCImportGuestLdtr(pVCpu);
3541
3542 if (fWhat & CPUMCTX_EXTRN_GDTR)
3543 {
3544 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3545 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3546 pCtx->gdtr.cbGdt = u32Val;
3547 }
3548
3549 /* Guest IDTR. */
3550 if (fWhat & CPUMCTX_EXTRN_IDTR)
3551 {
3552 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3553 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3554 pCtx->idtr.cbIdt = u32Val;
3555 }
3556
3557 /* Guest TR. */
3558 if (fWhat & CPUMCTX_EXTRN_TR)
3559 {
3560#ifndef IN_NEM_DARWIN
3561 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3562 don't need to import that one. */
3563 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3564#endif
3565 vmxHCImportGuestTr(pVCpu);
3566 }
3567 }
3568
3569 if (fWhat & CPUMCTX_EXTRN_DR7)
3570 {
3571#ifndef IN_NEM_DARWIN
3572 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3573#endif
3574 {
3575 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3576 AssertRC(rc);
3577 }
3578 }
3579
3580 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3581 {
3582 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3583 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3584 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3585 pCtx->SysEnter.cs = u32Val;
3586 }
3587
3588#ifndef IN_NEM_DARWIN
3589 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3590 {
3591 if ( pVM->hmr0.s.fAllow64BitGuests
3592 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3593 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3594 }
3595
3596 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3597 {
3598 if ( pVM->hmr0.s.fAllow64BitGuests
3599 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3600 {
3601 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3602 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3603 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3604 }
3605 }
3606
3607 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3608 {
3609 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3610 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3611 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3612 Assert(pMsrs);
3613 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3614 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3615 for (uint32_t i = 0; i < cMsrs; i++)
3616 {
3617 uint32_t const idMsr = pMsrs[i].u32Msr;
3618 switch (idMsr)
3619 {
3620 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3621 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3622 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3623 default:
3624 {
3625 uint32_t idxLbrMsr;
3626 if (VM_IS_VMX_LBR(pVM))
3627 {
3628 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3629 {
3630 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3631 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3632 break;
3633 }
3634 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3635 {
3636 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3637 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3638 break;
3639 }
3640 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3641 {
3642 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3643 break;
3644 }
3645 /* Fallthru (no break) */
3646 }
3647 pCtx->fExtrn = 0;
3648 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3649 ASMSetFlags(fEFlags);
3650 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3651 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3652 }
3653 }
3654 }
3655 }
3656#endif
3657
3658 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3659 {
3660 if (fWhat & CPUMCTX_EXTRN_CR0)
3661 {
3662 uint64_t u64Cr0;
3663 uint64_t u64Shadow;
3664 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3665 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3666#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3667 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3668 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3669#else
3670 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3671 {
3672 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3673 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3674 }
3675 else
3676 {
3677 /*
3678 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3679 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3680 * re-construct CR0. See @bugref{9180#c95} for details.
3681 */
3682 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3683 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3684 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3685 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3686 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3687 }
3688#endif
3689#ifndef IN_NEM_DARWIN
3690 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3691#endif
3692 CPUMSetGuestCR0(pVCpu, u64Cr0);
3693#ifndef IN_NEM_DARWIN
3694 VMMRZCallRing3Enable(pVCpu);
3695#endif
3696 }
3697
3698 if (fWhat & CPUMCTX_EXTRN_CR4)
3699 {
3700 uint64_t u64Cr4;
3701 uint64_t u64Shadow;
3702 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3703 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3704#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3705 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3706 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3707#else
3708 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3709 {
3710 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3711 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3712 }
3713 else
3714 {
3715 /*
3716 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3717 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3718 * re-construct CR4. See @bugref{9180#c95} for details.
3719 */
3720 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3721 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3722 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3723 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3724 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3725 }
3726#endif
3727 pCtx->cr4 = u64Cr4;
3728 }
3729
3730 if (fWhat & CPUMCTX_EXTRN_CR3)
3731 {
3732 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3733 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3734 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3735 && CPUMIsGuestPagingEnabledEx(pCtx)))
3736 {
3737 uint64_t u64Cr3;
3738 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3739 if (pCtx->cr3 != u64Cr3)
3740 {
3741 pCtx->cr3 = u64Cr3;
3742 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3743 }
3744
3745 /*
3746 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3747 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3748 */
3749 if (CPUMIsGuestInPAEModeEx(pCtx))
3750 {
3751 X86PDPE aPaePdpes[4];
3752 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3753 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3754 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3755 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3756 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3757 {
3758 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3759 /* PGM now updates PAE PDPTEs while updating CR3. */
3760 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3761 }
3762 }
3763 }
3764 }
3765 }
3766
3767#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3768 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3769 {
3770 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3771 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3772 {
3773 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3774 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3775 if (RT_SUCCESS(rc))
3776 { /* likely */ }
3777 else
3778 break;
3779 }
3780 }
3781#endif
3782 } while (0);
3783
3784 if (RT_SUCCESS(rc))
3785 {
3786 /* Update fExtrn. */
3787 pCtx->fExtrn &= ~fWhat;
3788
3789 /* If everything has been imported, clear the HM keeper bit. */
3790 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3791 {
3792#ifndef IN_NEM_DARWIN
3793 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3794#else
3795 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3796#endif
3797 Assert(!pCtx->fExtrn);
3798 }
3799 }
3800 }
3801#ifndef IN_NEM_DARWIN
3802 else
3803 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3804
3805 /*
3806 * Restore interrupts.
3807 */
3808 ASMSetFlags(fEFlags);
3809#endif
3810
3811 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3812
3813 if (RT_SUCCESS(rc))
3814 { /* likely */ }
3815 else
3816 return rc;
3817
3818 /*
3819 * Honor any pending CR3 updates.
3820 *
3821 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3822 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3823 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3824 *
3825 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3826 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3827 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3828 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3829 *
3830 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3831 *
3832 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3833 */
3834 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3835#ifndef IN_NEM_DARWIN
3836 && VMMRZCallRing3IsEnabled(pVCpu)
3837#endif
3838 )
3839 {
3840 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3841 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3842 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3843 }
3844
3845 return VINF_SUCCESS;
3846}
3847
3848
3849/**
3850 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3851 *
3852 * @returns VBox status code.
3853 * @param pVCpu The cross context virtual CPU structure.
3854 * @param pVmcsInfo The VMCS info. object.
3855 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3856 * in NEM/darwin context.
3857 * @tparam a_fWhat What to import, zero or more bits from
3858 * HMVMX_CPUMCTX_EXTRN_ALL.
3859 */
3860template<uint64_t const a_fWhat>
3861static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3862{
3863 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3864 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3865 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3866 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3867
3868 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3869
3870 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3871
3872 /* RIP and RFLAGS may have been imported already by the post exit code
3873 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3874 of the code is skipping this part of the code. */
3875 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3876 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3877 {
3878 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3879 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3880
3881 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3882 {
3883 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3884 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3885 else
3886 vmxHCImportGuestCoreRip(pVCpu);
3887 }
3888 }
3889
3890 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3891 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3892 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3893
3894 if (a_fWhat & CPUMCTX_EXTRN_RSP)
3895 {
3896 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
3897 AssertRC(rc);
3898 }
3899
3900 if (a_fWhat & CPUMCTX_EXTRN_SREG_MASK)
3901 {
3902#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
3903 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
3904 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3905#endif
3906 if (a_fWhat & CPUMCTX_EXTRN_CS)
3907 {
3908 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3909#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
3910 if (fRealOnV86Active)
3911 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3912#endif
3913 /** @todo try get rid of this carp, it smells is probably never ever used: */
3914 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3915 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3916 {
3917 vmxHCImportGuestCoreRip(pVCpu);
3918 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3919 }
3920 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3921 }
3922 if (a_fWhat & CPUMCTX_EXTRN_SS)
3923 {
3924 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3925#ifndef IN_NEM_DARWIN
3926 if (fRealOnV86Active)
3927 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3928#endif
3929 }
3930 if (a_fWhat & CPUMCTX_EXTRN_DS)
3931 {
3932 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3933#ifndef IN_NEM_DARWIN
3934 if (fRealOnV86Active)
3935 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3936#endif
3937 }
3938 if (a_fWhat & CPUMCTX_EXTRN_ES)
3939 {
3940 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3941#ifndef IN_NEM_DARWIN
3942 if (fRealOnV86Active)
3943 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3944#endif
3945 }
3946 if (a_fWhat & CPUMCTX_EXTRN_FS)
3947 {
3948 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3949#ifndef IN_NEM_DARWIN
3950 if (fRealOnV86Active)
3951 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3952#endif
3953 }
3954 if (a_fWhat & CPUMCTX_EXTRN_GS)
3955 {
3956 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3957#ifndef IN_NEM_DARWIN
3958 if (fRealOnV86Active)
3959 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3960#endif
3961 }
3962 }
3963
3964 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
3965 vmxHCImportGuestLdtr(pVCpu);
3966
3967 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
3968 {
3969 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
3970 uint32_t u32Val;
3971 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
3972 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
3973 }
3974
3975 /* Guest IDTR. */
3976 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
3977 {
3978 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
3979 uint32_t u32Val;
3980 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
3981 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
3982 }
3983
3984 /* Guest TR. */
3985 if (a_fWhat & CPUMCTX_EXTRN_TR)
3986 {
3987#ifndef IN_NEM_DARWIN
3988 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3989 don't need to import that one. */
3990 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3991#endif
3992 vmxHCImportGuestTr(pVCpu);
3993 }
3994
3995 if (a_fWhat & CPUMCTX_EXTRN_DR7)
3996 {
3997#ifndef IN_NEM_DARWIN
3998 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3999#endif
4000 {
4001 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
4002 AssertRC(rc);
4003 }
4004 }
4005
4006 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
4007 {
4008 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
4009 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
4010 uint32_t u32Val;
4011 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
4012 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
4013 }
4014
4015#ifndef IN_NEM_DARWIN
4016 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
4017 {
4018 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4019 && pVM->hmr0.s.fAllow64BitGuests)
4020 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4021 }
4022
4023 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4024 {
4025 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4026 && pVM->hmr0.s.fAllow64BitGuests)
4027 {
4028 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4029 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4030 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4031 }
4032 }
4033
4034 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4035 {
4036 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
4037 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
4038 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
4039 Assert(pMsrs);
4040 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
4041 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
4042 for (uint32_t i = 0; i < cMsrs; i++)
4043 {
4044 uint32_t const idMsr = pMsrs[i].u32Msr;
4045 switch (idMsr)
4046 {
4047 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
4048 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
4049 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
4050 default:
4051 {
4052 uint32_t idxLbrMsr;
4053 if (VM_IS_VMX_LBR(pVM))
4054 {
4055 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
4056 {
4057 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4058 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4059 break;
4060 }
4061 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
4062 {
4063 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4064 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4065 break;
4066 }
4067 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
4068 {
4069 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
4070 break;
4071 }
4072 }
4073 pVCpu->cpum.GstCtx.fExtrn = 0;
4074 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
4075 ASMSetFlags(fEFlags);
4076 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
4077 return VERR_HM_UNEXPECTED_LD_ST_MSR;
4078 }
4079 }
4080 }
4081 }
4082#endif
4083
4084 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4085 {
4086 uint64_t u64Cr0;
4087 uint64_t u64Shadow;
4088 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc1);
4089 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4090#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4091 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4092 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4093#else
4094 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4095 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4096 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4097 else
4098 {
4099 /*
4100 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
4101 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4102 * re-construct CR0. See @bugref{9180#c95} for details.
4103 */
4104 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4105 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4106 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4107 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
4108 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
4109 }
4110#endif
4111#ifndef IN_NEM_DARWIN
4112 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
4113#endif
4114 CPUMSetGuestCR0(pVCpu, u64Cr0);
4115#ifndef IN_NEM_DARWIN
4116 VMMRZCallRing3Enable(pVCpu);
4117#endif
4118 }
4119
4120 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4121 {
4122 uint64_t u64Cr4;
4123 uint64_t u64Shadow;
4124 int rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc1);
4125 int rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4126#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4127 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4128 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4129#else
4130 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4131 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4132 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4133 else
4134 {
4135 /*
4136 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
4137 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4138 * re-construct CR4. See @bugref{9180#c95} for details.
4139 */
4140 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4141 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4142 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4143 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
4144 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
4145 }
4146#endif
4147 pVCpu->cpum.GstCtx.cr4 = u64Cr4;
4148 }
4149
4150 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4151 {
4152 /* CR0.PG bit changes are always intercepted, so it's up to date. */
4153 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
4154 || ( VM_IS_VMX_NESTED_PAGING(pVM)
4155 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)))
4156 {
4157 uint64_t u64Cr3;
4158 int const rc0 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc0);
4159 if (pVCpu->cpum.GstCtx.cr3 != u64Cr3)
4160 {
4161 pVCpu->cpum.GstCtx.cr3 = u64Cr3;
4162 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4163 }
4164
4165 /*
4166 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
4167 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
4168 */
4169 if (CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx))
4170 {
4171 X86PDPE aPaePdpes[4];
4172 int const rc1 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc1);
4173 int const rc2 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc2);
4174 int const rc3 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc3);
4175 int const rc4 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc4);
4176 if (memcmp(&aPaePdpes[0], &pVCpu->cpum.GstCtx.aPaePdpes[0], sizeof(aPaePdpes)))
4177 {
4178 memcpy(&pVCpu->cpum.GstCtx.aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
4179 /* PGM now updates PAE PDPTEs while updating CR3. */
4180 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4181 }
4182 }
4183 }
4184 }
4185
4186#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4187 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4188 {
4189 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4190 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4191 {
4192 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4193 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4194 AssertRCReturn(rc, rc);
4195 }
4196 }
4197#endif
4198
4199 /* Update fExtrn. */
4200 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4201
4202 /* If everything has been imported, clear the HM keeper bit. */
4203 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4204 {
4205#ifndef IN_NEM_DARWIN
4206 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4207#else
4208 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4209#endif
4210 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4211 }
4212
4213 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4214
4215 /*
4216 * Honor any pending CR3 updates.
4217 *
4218 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4219 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4220 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4221 *
4222 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4223 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4224 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4225 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4226 *
4227 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4228 *
4229 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4230 */
4231#ifndef IN_NEM_DARWIN
4232 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4233 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4234 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4235 return VINF_SUCCESS;
4236 ASMSetFlags(fEFlags);
4237#else
4238 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4239 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4240 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4241 return VINF_SUCCESS;
4242 RT_NOREF_PV(fEFlags);
4243#endif
4244
4245 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4246 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4247 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4248 return VINF_SUCCESS;
4249}
4250
4251
4252/**
4253 * Internal state fetcher.
4254 *
4255 * @returns VBox status code.
4256 * @param pVCpu The cross context virtual CPU structure.
4257 * @param pVmcsInfo The VMCS info. object.
4258 * @param pszCaller For logging.
4259 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4260 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4261 * already. This is ORed together with @a a_fWhat when
4262 * calculating what needs fetching (just for safety).
4263 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4264 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4265 * already. This is ORed together with @a a_fWhat when
4266 * calculating what needs fetching (just for safety).
4267 */
4268template<uint64_t const a_fWhat,
4269 uint64_t const a_fDoneLocal = 0,
4270 uint64_t const a_fDonePostExit = 0
4271#ifndef IN_NEM_DARWIN
4272 | CPUMCTX_EXTRN_INHIBIT_INT
4273 | CPUMCTX_EXTRN_INHIBIT_NMI
4274# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4275 | HMVMX_CPUMCTX_EXTRN_ALL
4276# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4277 | CPUMCTX_EXTRN_RFLAGS
4278# endif
4279#else /* IN_NEM_DARWIN */
4280 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4281#endif /* IN_NEM_DARWIN */
4282>
4283DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4284{
4285 RT_NOREF_PV(pszCaller);
4286 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4287 {
4288#ifndef IN_NEM_DARWIN
4289 /*
4290 * We disable interrupts to make the updating of the state and in particular
4291 * the fExtrn modification atomic wrt to preemption hooks.
4292 */
4293 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4294#else
4295 RTCCUINTREG const fEFlags = 0;
4296#endif
4297
4298 /*
4299 * We combine all three parameters and take the (probably) inlined optimized
4300 * code path for the new things specified in a_fWhat.
4301 *
4302 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4303 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4304 * also take the streamlined path when both of these are cleared in fExtrn
4305 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4306 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4307 */
4308 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4309 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4310 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4311 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4312 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4313 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4314 {
4315 int const rc = vmxHCImportGuestStateInner< a_fWhat
4316 & HMVMX_CPUMCTX_EXTRN_ALL
4317 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4318#ifndef IN_NEM_DARWIN
4319 ASMSetFlags(fEFlags);
4320#endif
4321 return rc;
4322 }
4323
4324#ifndef IN_NEM_DARWIN
4325 ASMSetFlags(fEFlags);
4326#endif
4327
4328 /*
4329 * We shouldn't normally get here, but it may happen when executing
4330 * in the debug run-loops. Typically, everything should already have
4331 * been fetched then. Otherwise call the fallback state import function.
4332 */
4333 if (fWhatToDo == 0)
4334 { /* hope the cause was the debug loop or something similar */ }
4335 else
4336 {
4337 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4338 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4339 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4340 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4341 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4342 }
4343 }
4344 return VINF_SUCCESS;
4345}
4346
4347
4348/**
4349 * Check per-VM and per-VCPU force flag actions that require us to go back to
4350 * ring-3 for one reason or another.
4351 *
4352 * @returns Strict VBox status code (i.e. informational status codes too)
4353 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4354 * ring-3.
4355 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4356 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4357 * interrupts)
4358 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4359 * all EMTs to be in ring-3.
4360 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4361 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4362 * to the EM loop.
4363 *
4364 * @param pVCpu The cross context virtual CPU structure.
4365 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4366 * @param fStepping Whether we are single-stepping the guest using the
4367 * hypervisor debugger.
4368 *
4369 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4370 * is no longer in VMX non-root mode.
4371 */
4372static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4373{
4374#ifndef IN_NEM_DARWIN
4375 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4376#endif
4377
4378 /*
4379 * Update pending interrupts into the APIC's IRR.
4380 */
4381 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4382 APICUpdatePendingInterrupts(pVCpu);
4383
4384 /*
4385 * Anything pending? Should be more likely than not if we're doing a good job.
4386 */
4387 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4388 if ( !fStepping
4389 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4390 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4391 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4392 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4393 return VINF_SUCCESS;
4394
4395 /* Pending PGM C3 sync. */
4396 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4397 {
4398 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4399 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4400 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4401 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4402 if (rcStrict != VINF_SUCCESS)
4403 {
4404 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4405 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4406 return rcStrict;
4407 }
4408 }
4409
4410 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4411 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4412 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4413 {
4414 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4415 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4416 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4417 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4418 return rc;
4419 }
4420
4421 /* Pending VM request packets, such as hardware interrupts. */
4422 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4423 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4424 {
4425 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4426 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4427 return VINF_EM_PENDING_REQUEST;
4428 }
4429
4430 /* Pending PGM pool flushes. */
4431 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4432 {
4433 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4434 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4435 return VINF_PGM_POOL_FLUSH_PENDING;
4436 }
4437
4438 /* Pending DMA requests. */
4439 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4440 {
4441 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4442 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4443 return VINF_EM_RAW_TO_R3;
4444 }
4445
4446#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4447 /*
4448 * Pending nested-guest events.
4449 *
4450 * Please note the priority of these events are specified and important.
4451 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4452 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4453 */
4454 if (fIsNestedGuest)
4455 {
4456 /* Pending nested-guest APIC-write. */
4457 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4458 {
4459 Log4Func(("Pending nested-guest APIC-write\n"));
4460 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4461 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4462 return rcStrict;
4463 }
4464
4465 /* Pending nested-guest monitor-trap flag (MTF). */
4466 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4467 {
4468 Log4Func(("Pending nested-guest MTF\n"));
4469 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4470 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4471 return rcStrict;
4472 }
4473
4474 /* Pending nested-guest VMX-preemption timer expired. */
4475 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4476 {
4477 Log4Func(("Pending nested-guest preempt timer\n"));
4478 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4479 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4480 return rcStrict;
4481 }
4482 }
4483#else
4484 NOREF(fIsNestedGuest);
4485#endif
4486
4487 return VINF_SUCCESS;
4488}
4489
4490
4491/**
4492 * Converts any TRPM trap into a pending HM event. This is typically used when
4493 * entering from ring-3 (not longjmp returns).
4494 *
4495 * @param pVCpu The cross context virtual CPU structure.
4496 */
4497static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4498{
4499 Assert(TRPMHasTrap(pVCpu));
4500 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4501
4502 uint8_t uVector;
4503 TRPMEVENT enmTrpmEvent;
4504 uint32_t uErrCode;
4505 RTGCUINTPTR GCPtrFaultAddress;
4506 uint8_t cbInstr;
4507 bool fIcebp;
4508
4509 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4510 AssertRC(rc);
4511
4512 uint32_t u32IntInfo;
4513 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4514 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4515
4516 rc = TRPMResetTrap(pVCpu);
4517 AssertRC(rc);
4518 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4519 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4520
4521 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4522}
4523
4524
4525/**
4526 * Converts the pending HM event into a TRPM trap.
4527 *
4528 * @param pVCpu The cross context virtual CPU structure.
4529 */
4530static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4531{
4532 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4533
4534 /* If a trap was already pending, we did something wrong! */
4535 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4536
4537 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4538 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4539 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4540
4541 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4542
4543 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4544 AssertRC(rc);
4545
4546 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4547 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4548
4549 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4550 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4551 else
4552 {
4553 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4554 switch (uVectorType)
4555 {
4556 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4557 TRPMSetTrapDueToIcebp(pVCpu);
4558 RT_FALL_THRU();
4559 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4560 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4561 {
4562 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4563 || ( uVector == X86_XCPT_BP /* INT3 */
4564 || uVector == X86_XCPT_OF /* INTO */
4565 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4566 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4567 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4568 break;
4569 }
4570 }
4571 }
4572
4573 /* We're now done converting the pending event. */
4574 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4575}
4576
4577
4578/**
4579 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4580 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4581 *
4582 * @param pVCpu The cross context virtual CPU structure.
4583 * @param pVmcsInfo The VMCS info. object.
4584 */
4585static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4586{
4587 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4588 {
4589 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4590 {
4591 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4592 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4593 AssertRC(rc);
4594 }
4595 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4596}
4597
4598
4599/**
4600 * Clears the interrupt-window exiting control in the VMCS.
4601 *
4602 * @param pVCpu The cross context virtual CPU structure.
4603 * @param pVmcsInfo The VMCS info. object.
4604 */
4605DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4606{
4607 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4608 {
4609 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4610 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4611 AssertRC(rc);
4612 }
4613}
4614
4615
4616/**
4617 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4618 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4619 *
4620 * @param pVCpu The cross context virtual CPU structure.
4621 * @param pVmcsInfo The VMCS info. object.
4622 */
4623static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4624{
4625 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4626 {
4627 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4628 {
4629 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4630 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4631 AssertRC(rc);
4632 Log4Func(("Setup NMI-window exiting\n"));
4633 }
4634 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4635}
4636
4637
4638/**
4639 * Clears the NMI-window exiting control in the VMCS.
4640 *
4641 * @param pVCpu The cross context virtual CPU structure.
4642 * @param pVmcsInfo The VMCS info. object.
4643 */
4644DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4645{
4646 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4647 {
4648 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4649 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4650 AssertRC(rc);
4651 }
4652}
4653
4654
4655/**
4656 * Injects an event into the guest upon VM-entry by updating the relevant fields
4657 * in the VM-entry area in the VMCS.
4658 *
4659 * @returns Strict VBox status code (i.e. informational status codes too).
4660 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4661 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4662 *
4663 * @param pVCpu The cross context virtual CPU structure.
4664 * @param pVmcsInfo The VMCS info object.
4665 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4666 * @param pEvent The event being injected.
4667 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4668 * will be updated if necessary. This cannot not be NULL.
4669 * @param fStepping Whether we're single-stepping guest execution and should
4670 * return VINF_EM_DBG_STEPPED if the event is injected
4671 * directly (registers modified by us, not by hardware on
4672 * VM-entry).
4673 */
4674static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4675 bool fStepping, uint32_t *pfIntrState)
4676{
4677 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4678 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4679 Assert(pfIntrState);
4680
4681#ifdef IN_NEM_DARWIN
4682 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4683#endif
4684
4685 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4686 uint32_t u32IntInfo = pEvent->u64IntInfo;
4687 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4688 uint32_t const cbInstr = pEvent->cbInstr;
4689 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4690 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4691 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4692
4693#ifdef VBOX_STRICT
4694 /*
4695 * Validate the error-code-valid bit for hardware exceptions.
4696 * No error codes for exceptions in real-mode.
4697 *
4698 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4699 */
4700 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4701 && !CPUMIsGuestInRealModeEx(pCtx))
4702 {
4703 switch (uVector)
4704 {
4705 case X86_XCPT_PF:
4706 case X86_XCPT_DF:
4707 case X86_XCPT_TS:
4708 case X86_XCPT_NP:
4709 case X86_XCPT_SS:
4710 case X86_XCPT_GP:
4711 case X86_XCPT_AC:
4712 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4713 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4714 RT_FALL_THRU();
4715 default:
4716 break;
4717 }
4718 }
4719
4720 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4721 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4722 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4723#endif
4724
4725 RT_NOREF(uVector);
4726 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4727 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4728 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4729 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4730 {
4731 Assert(uVector <= X86_XCPT_LAST);
4732 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4733 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4734 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4735 }
4736 else
4737 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4738
4739 /*
4740 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4741 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4742 * interrupt handler in the (real-mode) guest.
4743 *
4744 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4745 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4746 */
4747 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4748 {
4749#ifndef IN_NEM_DARWIN
4750 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4751#endif
4752 {
4753 /*
4754 * For CPUs with unrestricted guest execution enabled and with the guest
4755 * in real-mode, we must not set the deliver-error-code bit.
4756 *
4757 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4758 */
4759 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4760 }
4761#ifndef IN_NEM_DARWIN
4762 else
4763 {
4764 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4765 Assert(PDMVmmDevHeapIsEnabled(pVM));
4766 Assert(pVM->hm.s.vmx.pRealModeTSS);
4767 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4768
4769 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4770 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4771 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4772 AssertRCReturn(rc2, rc2);
4773
4774 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4775 size_t const cbIdtEntry = sizeof(X86IDTR16);
4776 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4777 {
4778 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4779 if (uVector == X86_XCPT_DF)
4780 return VINF_EM_RESET;
4781
4782 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4783 No error codes for exceptions in real-mode. */
4784 if (uVector == X86_XCPT_GP)
4785 {
4786 static HMEVENT const s_EventXcptDf
4787 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4788 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4789 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4790 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4791 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4792 }
4793
4794 /*
4795 * If we're injecting an event with no valid IDT entry, inject a #GP.
4796 * No error codes for exceptions in real-mode.
4797 *
4798 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4799 */
4800 static HMEVENT const s_EventXcptGp
4801 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4802 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4803 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4804 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4805 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4806 }
4807
4808 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4809 uint16_t uGuestIp = pCtx->ip;
4810 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4811 {
4812 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4813 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4814 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4815 }
4816 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4817 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4818
4819 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4820 X86IDTR16 IdtEntry;
4821 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4822 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4823 AssertRCReturn(rc2, rc2);
4824
4825 /* Construct the stack frame for the interrupt/exception handler. */
4826 VBOXSTRICTRC rcStrict;
4827 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4828 if (rcStrict == VINF_SUCCESS)
4829 {
4830 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4831 if (rcStrict == VINF_SUCCESS)
4832 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4833 }
4834
4835 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4836 if (rcStrict == VINF_SUCCESS)
4837 {
4838 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4839 pCtx->rip = IdtEntry.offSel;
4840 pCtx->cs.Sel = IdtEntry.uSel;
4841 pCtx->cs.ValidSel = IdtEntry.uSel;
4842 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4843 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4844 && uVector == X86_XCPT_PF)
4845 pCtx->cr2 = GCPtrFault;
4846
4847 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4848 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4849 | HM_CHANGED_GUEST_RSP);
4850
4851 /*
4852 * If we delivered a hardware exception (other than an NMI) and if there was
4853 * block-by-STI in effect, we should clear it.
4854 */
4855 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4856 {
4857 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4858 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4859 Log4Func(("Clearing inhibition due to STI\n"));
4860 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4861 }
4862
4863 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4864 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4865
4866 /*
4867 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4868 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4869 */
4870 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4871
4872 /*
4873 * If we eventually support nested-guest execution without unrestricted guest execution,
4874 * we should set fInterceptEvents here.
4875 */
4876 Assert(!fIsNestedGuest);
4877
4878 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4879 if (fStepping)
4880 rcStrict = VINF_EM_DBG_STEPPED;
4881 }
4882 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4883 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4884 return rcStrict;
4885 }
4886#else
4887 RT_NOREF(pVmcsInfo);
4888#endif
4889 }
4890
4891 /*
4892 * Validate.
4893 */
4894 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4895 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4896
4897 /*
4898 * Inject the event into the VMCS.
4899 */
4900 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4901 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4902 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4903 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4904 AssertRC(rc);
4905
4906 /*
4907 * Update guest CR2 if this is a page-fault.
4908 */
4909 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4910 pCtx->cr2 = GCPtrFault;
4911
4912 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4913 return VINF_SUCCESS;
4914}
4915
4916
4917/**
4918 * Evaluates the event to be delivered to the guest and sets it as the pending
4919 * event.
4920 *
4921 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4922 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4923 * NOT restore these force-flags.
4924 *
4925 * @returns Strict VBox status code (i.e. informational status codes too).
4926 * @param pVCpu The cross context virtual CPU structure.
4927 * @param pVmcsInfo The VMCS information structure.
4928 * @param fIsNestedGuest Flag whether the evaluation happens for a nested guest.
4929 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4930 */
4931static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4932{
4933 Assert(pfIntrState);
4934 Assert(!TRPMHasTrap(pVCpu));
4935
4936 /*
4937 * Compute/update guest-interruptibility state related FFs.
4938 * The FFs will be used below while evaluating events to be injected.
4939 */
4940 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4941
4942 /*
4943 * Evaluate if a new event needs to be injected.
4944 * An event that's already pending has already performed all necessary checks.
4945 */
4946 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4947 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4948 {
4949 /** @todo SMI. SMIs take priority over NMIs. */
4950
4951 /*
4952 * NMIs.
4953 * NMIs take priority over external interrupts.
4954 */
4955#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4956 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4957#endif
4958 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4959 {
4960 /*
4961 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4962 *
4963 * For a nested-guest, the FF always indicates the outer guest's ability to
4964 * receive an NMI while the guest-interruptibility state bit depends on whether
4965 * the nested-hypervisor is using virtual-NMIs.
4966 */
4967 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4968 {
4969#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4970 if ( fIsNestedGuest
4971 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4972 return IEMExecVmxVmexitXcptNmi(pVCpu);
4973#endif
4974 vmxHCSetPendingXcptNmi(pVCpu);
4975 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4976 Log4Func(("NMI pending injection\n"));
4977
4978 /* We've injected the NMI, bail. */
4979 return VINF_SUCCESS;
4980 }
4981 else if (!fIsNestedGuest)
4982 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4983 }
4984
4985 /*
4986 * External interrupts (PIC/APIC).
4987 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4988 * We cannot re-request the interrupt from the controller again.
4989 */
4990 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4991 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4992 {
4993 Assert(!DBGFIsStepping(pVCpu));
4994 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4995 AssertRC(rc);
4996
4997 /*
4998 * We must not check EFLAGS directly when executing a nested-guest, use
4999 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
5000 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
5001 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
5002 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
5003 *
5004 * See Intel spec. 25.4.1 "Event Blocking".
5005 */
5006 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
5007 {
5008#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5009 if ( fIsNestedGuest
5010 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5011 {
5012 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
5013 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5014 return rcStrict;
5015 }
5016#endif
5017 uint8_t u8Interrupt;
5018 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5019 if (RT_SUCCESS(rc))
5020 {
5021#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5022 if ( fIsNestedGuest
5023 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5024 {
5025 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
5026 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5027 return rcStrict;
5028 }
5029#endif
5030 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
5031 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
5032 }
5033 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
5034 {
5035 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
5036
5037 if ( !fIsNestedGuest
5038 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
5039 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
5040 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
5041
5042 /*
5043 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
5044 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
5045 * need to re-set this force-flag here.
5046 */
5047 }
5048 else
5049 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
5050
5051 /* We've injected the interrupt or taken necessary action, bail. */
5052 return VINF_SUCCESS;
5053 }
5054 if (!fIsNestedGuest)
5055 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5056 }
5057 }
5058 else if (!fIsNestedGuest)
5059 {
5060 /*
5061 * An event is being injected or we are in an interrupt shadow. Check if another event is
5062 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
5063 * the pending event.
5064 */
5065 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5066 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
5067 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5068 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5069 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5070 }
5071 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
5072
5073 return VINF_SUCCESS;
5074}
5075
5076
5077/**
5078 * Injects any pending events into the guest if the guest is in a state to
5079 * receive them.
5080 *
5081 * @returns Strict VBox status code (i.e. informational status codes too).
5082 * @param pVCpu The cross context virtual CPU structure.
5083 * @param pVmcsInfo The VMCS information structure.
5084 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5085 * @param fIntrState The VT-x guest-interruptibility state.
5086 * @param fStepping Whether we are single-stepping the guest using the
5087 * hypervisor debugger and should return
5088 * VINF_EM_DBG_STEPPED if the event was dispatched
5089 * directly.
5090 */
5091static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5092 uint32_t fIntrState, bool fStepping)
5093{
5094 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5095#ifndef IN_NEM_DARWIN
5096 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5097#endif
5098
5099#ifdef VBOX_STRICT
5100 /*
5101 * Verify guest-interruptibility state.
5102 *
5103 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5104 * since injecting an event may modify the interruptibility state and we must thus always
5105 * use fIntrState.
5106 */
5107 {
5108 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5109 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5110 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5111 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5112 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5113 Assert(!TRPMHasTrap(pVCpu));
5114 NOREF(fBlockMovSS); NOREF(fBlockSti);
5115 }
5116#endif
5117
5118 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5119 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5120 {
5121 /*
5122 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5123 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5124 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5125 *
5126 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5127 */
5128 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5129#ifdef VBOX_STRICT
5130 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5131 {
5132 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
5133 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5134 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5135 }
5136 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5137 {
5138 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5139 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5140 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5141 }
5142#endif
5143 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5144 uIntType));
5145
5146 /*
5147 * Inject the event and get any changes to the guest-interruptibility state.
5148 *
5149 * The guest-interruptibility state may need to be updated if we inject the event
5150 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5151 */
5152 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5153 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5154
5155 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5156 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5157 else
5158 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5159 }
5160
5161 /*
5162 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5163 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5164 */
5165 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5166 && !fIsNestedGuest)
5167 {
5168 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5169
5170 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5171 {
5172 /*
5173 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5174 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5175 */
5176 Assert(!DBGFIsStepping(pVCpu));
5177 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
5178 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5179 AssertRC(rc);
5180 }
5181 else
5182 {
5183 /*
5184 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5185 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5186 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5187 * we use MTF, so just make sure it's called before executing guest-code.
5188 */
5189 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5190 }
5191 }
5192 /* else: for nested-guest currently handling while merging controls. */
5193
5194 /*
5195 * Finally, update the guest-interruptibility state.
5196 *
5197 * This is required for the real-on-v86 software interrupt injection, for
5198 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5199 */
5200 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5201 AssertRC(rc);
5202
5203 /*
5204 * There's no need to clear the VM-entry interruption-information field here if we're not
5205 * injecting anything. VT-x clears the valid bit on every VM-exit.
5206 *
5207 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5208 */
5209
5210 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5211 return rcStrict;
5212}
5213
5214
5215/**
5216 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5217 * and update error record fields accordingly.
5218 *
5219 * @returns VMX_IGS_* error codes.
5220 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5221 * wrong with the guest state.
5222 *
5223 * @param pVCpu The cross context virtual CPU structure.
5224 * @param pVmcsInfo The VMCS info. object.
5225 *
5226 * @remarks This function assumes our cache of the VMCS controls
5227 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5228 */
5229static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5230{
5231#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5232#define HMVMX_CHECK_BREAK(expr, err) do { \
5233 if (!(expr)) { uError = (err); break; } \
5234 } while (0)
5235
5236 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5237 uint32_t uError = VMX_IGS_ERROR;
5238 uint32_t u32IntrState = 0;
5239#ifndef IN_NEM_DARWIN
5240 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5241 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5242#else
5243 bool const fUnrestrictedGuest = true;
5244#endif
5245 do
5246 {
5247 int rc;
5248
5249 /*
5250 * Guest-interruptibility state.
5251 *
5252 * Read this first so that any check that fails prior to those that actually
5253 * require the guest-interruptibility state would still reflect the correct
5254 * VMCS value and avoids causing further confusion.
5255 */
5256 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5257 AssertRC(rc);
5258
5259 uint32_t u32Val;
5260 uint64_t u64Val;
5261
5262 /*
5263 * CR0.
5264 */
5265 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5266 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5267 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5268 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5269 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5270 if (fUnrestrictedGuest)
5271 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5272
5273 uint64_t u64GuestCr0;
5274 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5275 AssertRC(rc);
5276 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5277 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5278 if ( !fUnrestrictedGuest
5279 && (u64GuestCr0 & X86_CR0_PG)
5280 && !(u64GuestCr0 & X86_CR0_PE))
5281 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5282
5283 /*
5284 * CR4.
5285 */
5286 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5287 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5288 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5289
5290 uint64_t u64GuestCr4;
5291 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5292 AssertRC(rc);
5293 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5294 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5295
5296 /*
5297 * IA32_DEBUGCTL MSR.
5298 */
5299 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5300 AssertRC(rc);
5301 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5302 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5303 {
5304 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5305 }
5306 uint64_t u64DebugCtlMsr = u64Val;
5307
5308#ifdef VBOX_STRICT
5309 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5310 AssertRC(rc);
5311 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5312#endif
5313 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5314
5315 /*
5316 * RIP and RFLAGS.
5317 */
5318 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5319 AssertRC(rc);
5320 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5321 if ( !fLongModeGuest
5322 || !pCtx->cs.Attr.n.u1Long)
5323 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5324 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5325 * must be identical if the "IA-32e mode guest" VM-entry
5326 * control is 1 and CS.L is 1. No check applies if the
5327 * CPU supports 64 linear-address bits. */
5328
5329 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5330 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5331 AssertRC(rc);
5332 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5333 VMX_IGS_RFLAGS_RESERVED);
5334 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5335 uint32_t const u32Eflags = u64Val;
5336
5337 if ( fLongModeGuest
5338 || ( fUnrestrictedGuest
5339 && !(u64GuestCr0 & X86_CR0_PE)))
5340 {
5341 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5342 }
5343
5344 uint32_t u32EntryInfo;
5345 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5346 AssertRC(rc);
5347 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5348 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5349
5350 /*
5351 * 64-bit checks.
5352 */
5353 if (fLongModeGuest)
5354 {
5355 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5356 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5357 }
5358
5359 if ( !fLongModeGuest
5360 && (u64GuestCr4 & X86_CR4_PCIDE))
5361 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5362
5363 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5364 * 51:32 beyond the processor's physical-address width are 0. */
5365
5366 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5367 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5368 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5369
5370#ifndef IN_NEM_DARWIN
5371 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5372 AssertRC(rc);
5373 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5374
5375 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5376 AssertRC(rc);
5377 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5378#endif
5379
5380 /*
5381 * PERF_GLOBAL MSR.
5382 */
5383 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5384 {
5385 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5386 AssertRC(rc);
5387 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5388 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5389 }
5390
5391 /*
5392 * PAT MSR.
5393 */
5394 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5395 {
5396 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5397 AssertRC(rc);
5398 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5399 for (unsigned i = 0; i < 8; i++)
5400 {
5401 uint8_t u8Val = (u64Val & 0xff);
5402 if ( u8Val != 0 /* UC */
5403 && u8Val != 1 /* WC */
5404 && u8Val != 4 /* WT */
5405 && u8Val != 5 /* WP */
5406 && u8Val != 6 /* WB */
5407 && u8Val != 7 /* UC- */)
5408 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5409 u64Val >>= 8;
5410 }
5411 }
5412
5413 /*
5414 * EFER MSR.
5415 */
5416 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5417 {
5418 Assert(g_fHmVmxSupportsVmcsEfer);
5419 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5420 AssertRC(rc);
5421 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5422 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5423 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5424 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5425 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5426 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5427 * iemVmxVmentryCheckGuestState(). */
5428 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5429 || !(u64GuestCr0 & X86_CR0_PG)
5430 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5431 VMX_IGS_EFER_LMA_LME_MISMATCH);
5432 }
5433
5434 /*
5435 * Segment registers.
5436 */
5437 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5438 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5439 if (!(u32Eflags & X86_EFL_VM))
5440 {
5441 /* CS */
5442 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5443 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5444 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5445 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5446 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5447 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5448 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5449 /* CS cannot be loaded with NULL in protected mode. */
5450 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5451 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5452 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5453 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5454 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5455 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5456 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5457 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5458 else
5459 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5460
5461 /* SS */
5462 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5463 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5464 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5465 if ( !(pCtx->cr0 & X86_CR0_PE)
5466 || pCtx->cs.Attr.n.u4Type == 3)
5467 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5468
5469 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5470 {
5471 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5472 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5473 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5474 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5475 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5476 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5477 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5478 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5479 }
5480
5481 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5482 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5483 {
5484 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5485 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5486 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5487 || pCtx->ds.Attr.n.u4Type > 11
5488 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5489 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5490 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5491 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5492 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5493 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5494 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5495 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5496 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5497 }
5498 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5499 {
5500 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5501 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5502 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5503 || pCtx->es.Attr.n.u4Type > 11
5504 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5505 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5506 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5507 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5508 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5509 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5510 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5511 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5512 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5513 }
5514 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5515 {
5516 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5517 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5518 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5519 || pCtx->fs.Attr.n.u4Type > 11
5520 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5521 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5522 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5523 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5524 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5525 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5526 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5527 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5528 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5529 }
5530 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5531 {
5532 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5533 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5534 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5535 || pCtx->gs.Attr.n.u4Type > 11
5536 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5537 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5538 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5539 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5540 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5541 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5542 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5543 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5544 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5545 }
5546 /* 64-bit capable CPUs. */
5547 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5548 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5549 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5550 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5551 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5552 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5553 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5554 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5555 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5556 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5557 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5558 }
5559 else
5560 {
5561 /* V86 mode checks. */
5562 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5563 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5564 {
5565 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5566 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5567 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5568 }
5569 else
5570 {
5571 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5572 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5573 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5574 }
5575
5576 /* CS */
5577 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5578 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5579 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5580 /* SS */
5581 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5582 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5583 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5584 /* DS */
5585 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5586 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5587 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5588 /* ES */
5589 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5590 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5591 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5592 /* FS */
5593 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5594 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5595 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5596 /* GS */
5597 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5598 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5599 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5600 /* 64-bit capable CPUs. */
5601 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5602 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5603 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5604 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5605 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5606 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5607 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5608 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5609 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5610 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5611 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5612 }
5613
5614 /*
5615 * TR.
5616 */
5617 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5618 /* 64-bit capable CPUs. */
5619 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5620 if (fLongModeGuest)
5621 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5622 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5623 else
5624 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5625 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5626 VMX_IGS_TR_ATTR_TYPE_INVALID);
5627 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5628 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5629 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5630 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5631 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5632 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5633 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5634 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5635
5636 /*
5637 * GDTR and IDTR (64-bit capable checks).
5638 */
5639 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5640 AssertRC(rc);
5641 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5642
5643 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5644 AssertRC(rc);
5645 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5646
5647 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5648 AssertRC(rc);
5649 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5650
5651 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5652 AssertRC(rc);
5653 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5654
5655 /*
5656 * Guest Non-Register State.
5657 */
5658 /* Activity State. */
5659 uint32_t u32ActivityState;
5660 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5661 AssertRC(rc);
5662 HMVMX_CHECK_BREAK( !u32ActivityState
5663 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5664 VMX_IGS_ACTIVITY_STATE_INVALID);
5665 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5666 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5667
5668 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5669 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5670 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5671
5672 /** @todo Activity state and injecting interrupts. Left as a todo since we
5673 * currently don't use activity states but ACTIVE. */
5674
5675 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5676 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5677
5678 /* Guest interruptibility-state. */
5679 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5680 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5681 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5682 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5683 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5684 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5685 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5686 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5687 {
5688 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5689 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5690 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5691 }
5692 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5693 {
5694 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5695 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5696 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5697 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5698 }
5699 /** @todo Assumes the processor is not in SMM. */
5700 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5701 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5702 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5703 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5704 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5705 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5706 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5707 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5708
5709 /* Pending debug exceptions. */
5710 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5711 AssertRC(rc);
5712 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5713 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5714 u32Val = u64Val; /* For pending debug exceptions checks below. */
5715
5716 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5717 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5718 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5719 {
5720 if ( (u32Eflags & X86_EFL_TF)
5721 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5722 {
5723 /* Bit 14 is PendingDebug.BS. */
5724 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5725 }
5726 if ( !(u32Eflags & X86_EFL_TF)
5727 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5728 {
5729 /* Bit 14 is PendingDebug.BS. */
5730 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5731 }
5732 }
5733
5734#ifndef IN_NEM_DARWIN
5735 /* VMCS link pointer. */
5736 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5737 AssertRC(rc);
5738 if (u64Val != UINT64_C(0xffffffffffffffff))
5739 {
5740 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5741 /** @todo Bits beyond the processor's physical-address width MBZ. */
5742 /** @todo SMM checks. */
5743 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5744 Assert(pVmcsInfo->pvShadowVmcs);
5745 VMXVMCSREVID VmcsRevId;
5746 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5747 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5748 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5749 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5750 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5751 }
5752
5753 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5754 * not using nested paging? */
5755 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5756 && !fLongModeGuest
5757 && CPUMIsGuestInPAEModeEx(pCtx))
5758 {
5759 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5760 AssertRC(rc);
5761 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5762
5763 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5764 AssertRC(rc);
5765 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5766
5767 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5768 AssertRC(rc);
5769 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5770
5771 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5772 AssertRC(rc);
5773 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5774 }
5775#endif
5776
5777 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5778 if (uError == VMX_IGS_ERROR)
5779 uError = VMX_IGS_REASON_NOT_FOUND;
5780 } while (0);
5781
5782 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5783 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5784 return uError;
5785
5786#undef HMVMX_ERROR_BREAK
5787#undef HMVMX_CHECK_BREAK
5788}
5789
5790
5791#ifndef HMVMX_USE_FUNCTION_TABLE
5792/**
5793 * Handles a guest VM-exit from hardware-assisted VMX execution.
5794 *
5795 * @returns Strict VBox status code (i.e. informational status codes too).
5796 * @param pVCpu The cross context virtual CPU structure.
5797 * @param pVmxTransient The VMX-transient structure.
5798 */
5799DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5800{
5801#ifdef DEBUG_ramshankar
5802# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5803 do { \
5804 if (a_fSave != 0) \
5805 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5806 VBOXSTRICTRC rcStrict = a_CallExpr; \
5807 if (a_fSave != 0) \
5808 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5809 return rcStrict; \
5810 } while (0)
5811#else
5812# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5813#endif
5814 uint32_t const uExitReason = pVmxTransient->uExitReason;
5815 switch (uExitReason)
5816 {
5817 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5818 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5819 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5820 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5821 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5822 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5823 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5824 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5825 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5826 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5827 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5828 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5829 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5830 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5831 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5832 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5833 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5834 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5835 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5836 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5837 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5838 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5839 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5840 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5841 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5842 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5843 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5844 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5845 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5846 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5847#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5848 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5849 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5850 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5851 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5852 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5853 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5854 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5855 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5856 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5857 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5858#else
5859 case VMX_EXIT_VMCLEAR:
5860 case VMX_EXIT_VMLAUNCH:
5861 case VMX_EXIT_VMPTRLD:
5862 case VMX_EXIT_VMPTRST:
5863 case VMX_EXIT_VMREAD:
5864 case VMX_EXIT_VMRESUME:
5865 case VMX_EXIT_VMWRITE:
5866 case VMX_EXIT_VMXOFF:
5867 case VMX_EXIT_VMXON:
5868 case VMX_EXIT_INVVPID:
5869 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5870#endif
5871#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5872 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5873#else
5874 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5875#endif
5876
5877 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5878 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5879 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5880
5881 case VMX_EXIT_INIT_SIGNAL:
5882 case VMX_EXIT_SIPI:
5883 case VMX_EXIT_IO_SMI:
5884 case VMX_EXIT_SMI:
5885 case VMX_EXIT_ERR_MSR_LOAD:
5886 case VMX_EXIT_ERR_MACHINE_CHECK:
5887 case VMX_EXIT_PML_FULL:
5888 case VMX_EXIT_VIRTUALIZED_EOI:
5889 case VMX_EXIT_GDTR_IDTR_ACCESS:
5890 case VMX_EXIT_LDTR_TR_ACCESS:
5891 case VMX_EXIT_APIC_WRITE:
5892 case VMX_EXIT_RDRAND:
5893 case VMX_EXIT_RSM:
5894 case VMX_EXIT_VMFUNC:
5895 case VMX_EXIT_ENCLS:
5896 case VMX_EXIT_RDSEED:
5897 case VMX_EXIT_XSAVES:
5898 case VMX_EXIT_XRSTORS:
5899 case VMX_EXIT_UMWAIT:
5900 case VMX_EXIT_TPAUSE:
5901 case VMX_EXIT_LOADIWKEY:
5902 default:
5903 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5904 }
5905#undef VMEXIT_CALL_RET
5906}
5907#endif /* !HMVMX_USE_FUNCTION_TABLE */
5908
5909
5910#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5911/**
5912 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5913 *
5914 * @returns Strict VBox status code (i.e. informational status codes too).
5915 * @param pVCpu The cross context virtual CPU structure.
5916 * @param pVmxTransient The VMX-transient structure.
5917 */
5918DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5919{
5920 uint32_t const uExitReason = pVmxTransient->uExitReason;
5921 switch (uExitReason)
5922 {
5923# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5924 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5925 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5926# else
5927 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5928 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5929# endif
5930 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5931 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5932 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5933
5934 /*
5935 * We shouldn't direct host physical interrupts to the nested-guest.
5936 */
5937 case VMX_EXIT_EXT_INT:
5938 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5939
5940 /*
5941 * Instructions that cause VM-exits unconditionally or the condition is
5942 * always taken solely from the nested hypervisor (meaning if the VM-exit
5943 * happens, it's guaranteed to be a nested-guest VM-exit).
5944 *
5945 * - Provides VM-exit instruction length ONLY.
5946 */
5947 case VMX_EXIT_CPUID: /* Unconditional. */
5948 case VMX_EXIT_VMCALL:
5949 case VMX_EXIT_GETSEC:
5950 case VMX_EXIT_INVD:
5951 case VMX_EXIT_XSETBV:
5952 case VMX_EXIT_VMLAUNCH:
5953 case VMX_EXIT_VMRESUME:
5954 case VMX_EXIT_VMXOFF:
5955 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5956 case VMX_EXIT_VMFUNC:
5957 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5958
5959 /*
5960 * Instructions that cause VM-exits unconditionally or the condition is
5961 * always taken solely from the nested hypervisor (meaning if the VM-exit
5962 * happens, it's guaranteed to be a nested-guest VM-exit).
5963 *
5964 * - Provides VM-exit instruction length.
5965 * - Provides VM-exit information.
5966 * - Optionally provides Exit qualification.
5967 *
5968 * Since Exit qualification is 0 for all VM-exits where it is not
5969 * applicable, reading and passing it to the guest should produce
5970 * defined behavior.
5971 *
5972 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5973 */
5974 case VMX_EXIT_INVEPT: /* Unconditional. */
5975 case VMX_EXIT_INVVPID:
5976 case VMX_EXIT_VMCLEAR:
5977 case VMX_EXIT_VMPTRLD:
5978 case VMX_EXIT_VMPTRST:
5979 case VMX_EXIT_VMXON:
5980 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5981 case VMX_EXIT_LDTR_TR_ACCESS:
5982 case VMX_EXIT_RDRAND:
5983 case VMX_EXIT_RDSEED:
5984 case VMX_EXIT_XSAVES:
5985 case VMX_EXIT_XRSTORS:
5986 case VMX_EXIT_UMWAIT:
5987 case VMX_EXIT_TPAUSE:
5988 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5989
5990 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5991 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5992 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5993 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5994 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5995 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5996 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5997 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5998 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5999 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
6000 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
6001 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
6002 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
6003 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
6004 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
6005 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
6006 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
6007 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
6008 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
6009
6010 case VMX_EXIT_PREEMPT_TIMER:
6011 {
6012 /** @todo NSTVMX: Preempt timer. */
6013 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
6014 }
6015
6016 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
6017 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
6018
6019 case VMX_EXIT_VMREAD:
6020 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
6021
6022 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
6023 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
6024
6025 case VMX_EXIT_INIT_SIGNAL:
6026 case VMX_EXIT_SIPI:
6027 case VMX_EXIT_IO_SMI:
6028 case VMX_EXIT_SMI:
6029 case VMX_EXIT_ERR_MSR_LOAD:
6030 case VMX_EXIT_ERR_MACHINE_CHECK:
6031 case VMX_EXIT_PML_FULL:
6032 case VMX_EXIT_RSM:
6033 default:
6034 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6035 }
6036}
6037#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6038
6039
6040/** @name VM-exit helpers.
6041 * @{
6042 */
6043/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6044/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6045/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6046
6047/** Macro for VM-exits called unexpectedly. */
6048#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
6049 do { \
6050 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6051 return VERR_VMX_UNEXPECTED_EXIT; \
6052 } while (0)
6053
6054#ifdef VBOX_STRICT
6055# ifndef IN_NEM_DARWIN
6056/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6057# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6058 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6059
6060# define HMVMX_ASSERT_PREEMPT_CPUID() \
6061 do { \
6062 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6063 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6064 } while (0)
6065
6066# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6067 do { \
6068 AssertPtr((a_pVCpu)); \
6069 AssertPtr((a_pVmxTransient)); \
6070 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6071 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6072 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6073 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6074 Assert((a_pVmxTransient)->pVmcsInfo); \
6075 Assert(ASMIntAreEnabled()); \
6076 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6077 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6078 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6079 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6080 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6081 HMVMX_ASSERT_PREEMPT_CPUID(); \
6082 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6083 } while (0)
6084# else
6085# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6086# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6087# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6088 do { \
6089 AssertPtr((a_pVCpu)); \
6090 AssertPtr((a_pVmxTransient)); \
6091 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6092 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6093 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6094 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6095 Assert((a_pVmxTransient)->pVmcsInfo); \
6096 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6097 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6098 } while (0)
6099# endif
6100
6101# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6102 do { \
6103 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6104 Assert((a_pVmxTransient)->fIsNestedGuest); \
6105 } while (0)
6106
6107# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6108 do { \
6109 Log4Func(("\n")); \
6110 } while (0)
6111#else
6112# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6113 do { \
6114 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6115 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6116 } while (0)
6117
6118# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6119 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6120
6121# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6122#endif
6123
6124#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6125/** Macro that does the necessary privilege checks and intercepted VM-exits for
6126 * guests that attempted to execute a VMX instruction. */
6127# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6128 do \
6129 { \
6130 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6131 if (rcStrictTmp == VINF_SUCCESS) \
6132 { /* likely */ } \
6133 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6134 { \
6135 Assert((a_pVCpu)->hm.s.Event.fPending); \
6136 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6137 return VINF_SUCCESS; \
6138 } \
6139 else \
6140 { \
6141 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6142 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6143 } \
6144 } while (0)
6145
6146/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6147# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6148 do \
6149 { \
6150 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6151 (a_pGCPtrEffAddr)); \
6152 if (rcStrictTmp == VINF_SUCCESS) \
6153 { /* likely */ } \
6154 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6155 { \
6156 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6157 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6158 NOREF(uXcptTmp); \
6159 return VINF_SUCCESS; \
6160 } \
6161 else \
6162 { \
6163 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6164 return rcStrictTmp; \
6165 } \
6166 } while (0)
6167#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6168
6169
6170/**
6171 * Advances the guest RIP by the specified number of bytes.
6172 *
6173 * @param pVCpu The cross context virtual CPU structure.
6174 * @param cbInstr Number of bytes to advance the RIP by.
6175 *
6176 * @remarks No-long-jump zone!!!
6177 */
6178DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6179{
6180 /* Advance the RIP. */
6181 pVCpu->cpum.GstCtx.rip += cbInstr;
6182 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
6183
6184 /* Update interrupt inhibition. */
6185 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
6186 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
6187 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6188}
6189
6190
6191/**
6192 * Advances the guest RIP after reading it from the VMCS.
6193 *
6194 * @returns VBox status code, no informational status codes.
6195 * @param pVCpu The cross context virtual CPU structure.
6196 * @param pVmxTransient The VMX-transient structure.
6197 *
6198 * @remarks No-long-jump zone!!!
6199 */
6200static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6201{
6202 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6203 /** @todo consider template here after checking callers. */
6204 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6205 AssertRCReturn(rc, rc);
6206
6207 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6208 return VINF_SUCCESS;
6209}
6210
6211
6212/**
6213 * Handle a condition that occurred while delivering an event through the guest or
6214 * nested-guest IDT.
6215 *
6216 * @returns Strict VBox status code (i.e. informational status codes too).
6217 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6218 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6219 * to continue execution of the guest which will delivery the \#DF.
6220 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6221 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6222 *
6223 * @param pVCpu The cross context virtual CPU structure.
6224 * @param pVmxTransient The VMX-transient structure.
6225 *
6226 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6227 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6228 * is due to an EPT violation, PML full or SPP-related event.
6229 *
6230 * @remarks No-long-jump zone!!!
6231 */
6232static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6233{
6234 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6235 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6236 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6237 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6238 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6239 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6240
6241 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6242 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6243 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6244 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6245 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6246 {
6247 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6248 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6249
6250 /*
6251 * If the event was a software interrupt (generated with INT n) or a software exception
6252 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6253 * can handle the VM-exit and continue guest execution which will re-execute the
6254 * instruction rather than re-injecting the exception, as that can cause premature
6255 * trips to ring-3 before injection and involve TRPM which currently has no way of
6256 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6257 * the problem).
6258 */
6259 IEMXCPTRAISE enmRaise;
6260 IEMXCPTRAISEINFO fRaiseInfo;
6261 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6262 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6263 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6264 {
6265 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6266 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6267 }
6268 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6269 {
6270 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6271 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6272 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6273
6274 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6275 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6276
6277 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6278
6279 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6280 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6281 {
6282 pVmxTransient->fVectoringPF = true;
6283 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6284 }
6285 }
6286 else
6287 {
6288 /*
6289 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6290 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6291 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6292 */
6293 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6294 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6295 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6296 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6297 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6298 }
6299
6300 /*
6301 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6302 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6303 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6304 * subsequent VM-entry would fail, see @bugref{7445}.
6305 *
6306 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6307 */
6308 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6309 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6310 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6311 && CPUMIsGuestNmiBlocking(pVCpu))
6312 {
6313 CPUMSetGuestNmiBlocking(pVCpu, false);
6314 }
6315
6316 switch (enmRaise)
6317 {
6318 case IEMXCPTRAISE_CURRENT_XCPT:
6319 {
6320 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6321 Assert(rcStrict == VINF_SUCCESS);
6322 break;
6323 }
6324
6325 case IEMXCPTRAISE_PREV_EVENT:
6326 {
6327 uint32_t u32ErrCode;
6328 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6329 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6330 else
6331 u32ErrCode = 0;
6332
6333 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6334 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6335 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6336 pVCpu->cpum.GstCtx.cr2);
6337
6338 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6339 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6340 Assert(rcStrict == VINF_SUCCESS);
6341 break;
6342 }
6343
6344 case IEMXCPTRAISE_REEXEC_INSTR:
6345 Assert(rcStrict == VINF_SUCCESS);
6346 break;
6347
6348 case IEMXCPTRAISE_DOUBLE_FAULT:
6349 {
6350 /*
6351 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6352 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6353 */
6354 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6355 {
6356 pVmxTransient->fVectoringDoublePF = true;
6357 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6358 pVCpu->cpum.GstCtx.cr2));
6359 rcStrict = VINF_SUCCESS;
6360 }
6361 else
6362 {
6363 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6364 vmxHCSetPendingXcptDF(pVCpu);
6365 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6366 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6367 rcStrict = VINF_HM_DOUBLE_FAULT;
6368 }
6369 break;
6370 }
6371
6372 case IEMXCPTRAISE_TRIPLE_FAULT:
6373 {
6374 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6375 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6376 rcStrict = VINF_EM_RESET;
6377 break;
6378 }
6379
6380 case IEMXCPTRAISE_CPU_HANG:
6381 {
6382 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6383 rcStrict = VERR_EM_GUEST_CPU_HANG;
6384 break;
6385 }
6386
6387 default:
6388 {
6389 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6390 rcStrict = VERR_VMX_IPE_2;
6391 break;
6392 }
6393 }
6394 }
6395 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6396 && !CPUMIsGuestNmiBlocking(pVCpu))
6397 {
6398 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6399 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6400 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6401 {
6402 /*
6403 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6404 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6405 * that virtual NMIs remain blocked until the IRET execution is completed.
6406 *
6407 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6408 */
6409 CPUMSetGuestNmiBlocking(pVCpu, true);
6410 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6411 }
6412 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6413 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6414 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6415 {
6416 /*
6417 * Execution of IRET caused an EPT violation, page-modification log-full event or
6418 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6419 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6420 * that virtual NMIs remain blocked until the IRET execution is completed.
6421 *
6422 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6423 */
6424 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6425 {
6426 CPUMSetGuestNmiBlocking(pVCpu, true);
6427 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6428 }
6429 }
6430 }
6431
6432 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6433 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6434 return rcStrict;
6435}
6436
6437
6438#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6439/**
6440 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6441 * guest attempting to execute a VMX instruction.
6442 *
6443 * @returns Strict VBox status code (i.e. informational status codes too).
6444 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6445 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6446 *
6447 * @param pVCpu The cross context virtual CPU structure.
6448 * @param uExitReason The VM-exit reason.
6449 *
6450 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6451 * @remarks No-long-jump zone!!!
6452 */
6453static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6454{
6455 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6456 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6457
6458 /*
6459 * The physical CPU would have already checked the CPU mode/code segment.
6460 * We shall just assert here for paranoia.
6461 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6462 */
6463 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6464 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6465 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6466
6467 if (uExitReason == VMX_EXIT_VMXON)
6468 {
6469 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6470
6471 /*
6472 * We check CR4.VMXE because it is required to be always set while in VMX operation
6473 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6474 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6475 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6476 */
6477 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6478 {
6479 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6480 vmxHCSetPendingXcptUD(pVCpu);
6481 return VINF_HM_PENDING_XCPT;
6482 }
6483 }
6484 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6485 {
6486 /*
6487 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6488 * (other than VMXON), we need to raise a #UD.
6489 */
6490 Log4Func(("Not in VMX root mode -> #UD\n"));
6491 vmxHCSetPendingXcptUD(pVCpu);
6492 return VINF_HM_PENDING_XCPT;
6493 }
6494
6495 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6496 return VINF_SUCCESS;
6497}
6498
6499
6500/**
6501 * Decodes the memory operand of an instruction that caused a VM-exit.
6502 *
6503 * The Exit qualification field provides the displacement field for memory
6504 * operand instructions, if any.
6505 *
6506 * @returns Strict VBox status code (i.e. informational status codes too).
6507 * @retval VINF_SUCCESS if the operand was successfully decoded.
6508 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6509 * operand.
6510 * @param pVCpu The cross context virtual CPU structure.
6511 * @param uExitInstrInfo The VM-exit instruction information field.
6512 * @param enmMemAccess The memory operand's access type (read or write).
6513 * @param GCPtrDisp The instruction displacement field, if any. For
6514 * RIP-relative addressing pass RIP + displacement here.
6515 * @param pGCPtrMem Where to store the effective destination memory address.
6516 *
6517 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6518 * virtual-8086 mode hence skips those checks while verifying if the
6519 * segment is valid.
6520 */
6521static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6522 PRTGCPTR pGCPtrMem)
6523{
6524 Assert(pGCPtrMem);
6525 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6526 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6527 | CPUMCTX_EXTRN_CR0);
6528
6529 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6530 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6531 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6532
6533 VMXEXITINSTRINFO ExitInstrInfo;
6534 ExitInstrInfo.u = uExitInstrInfo;
6535 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6536 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6537 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6538 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6539 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6540 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6541 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6542 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6543 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6544
6545 /*
6546 * Validate instruction information.
6547 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6548 */
6549 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6550 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6551 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6552 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6553 AssertLogRelMsgReturn(fIsMemOperand,
6554 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6555
6556 /*
6557 * Compute the complete effective address.
6558 *
6559 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6560 * See AMD spec. 4.5.2 "Segment Registers".
6561 */
6562 RTGCPTR GCPtrMem = GCPtrDisp;
6563 if (fBaseRegValid)
6564 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6565 if (fIdxRegValid)
6566 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6567
6568 RTGCPTR const GCPtrOff = GCPtrMem;
6569 if ( !fIsLongMode
6570 || iSegReg >= X86_SREG_FS)
6571 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6572 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6573
6574 /*
6575 * Validate effective address.
6576 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6577 */
6578 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6579 Assert(cbAccess > 0);
6580 if (fIsLongMode)
6581 {
6582 if (X86_IS_CANONICAL(GCPtrMem))
6583 {
6584 *pGCPtrMem = GCPtrMem;
6585 return VINF_SUCCESS;
6586 }
6587
6588 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6589 * "Data Limit Checks in 64-bit Mode". */
6590 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6591 vmxHCSetPendingXcptGP(pVCpu, 0);
6592 return VINF_HM_PENDING_XCPT;
6593 }
6594
6595 /*
6596 * This is a watered down version of iemMemApplySegment().
6597 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6598 * and segment CPL/DPL checks are skipped.
6599 */
6600 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6601 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6602 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6603
6604 /* Check if the segment is present and usable. */
6605 if ( pSel->Attr.n.u1Present
6606 && !pSel->Attr.n.u1Unusable)
6607 {
6608 Assert(pSel->Attr.n.u1DescType);
6609 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6610 {
6611 /* Check permissions for the data segment. */
6612 if ( enmMemAccess == VMXMEMACCESS_WRITE
6613 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6614 {
6615 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6616 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6617 return VINF_HM_PENDING_XCPT;
6618 }
6619
6620 /* Check limits if it's a normal data segment. */
6621 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6622 {
6623 if ( GCPtrFirst32 > pSel->u32Limit
6624 || GCPtrLast32 > pSel->u32Limit)
6625 {
6626 Log4Func(("Data segment limit exceeded. "
6627 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6628 GCPtrLast32, pSel->u32Limit));
6629 if (iSegReg == X86_SREG_SS)
6630 vmxHCSetPendingXcptSS(pVCpu, 0);
6631 else
6632 vmxHCSetPendingXcptGP(pVCpu, 0);
6633 return VINF_HM_PENDING_XCPT;
6634 }
6635 }
6636 else
6637 {
6638 /* Check limits if it's an expand-down data segment.
6639 Note! The upper boundary is defined by the B bit, not the G bit! */
6640 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6641 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6642 {
6643 Log4Func(("Expand-down data segment limit exceeded. "
6644 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6645 GCPtrLast32, pSel->u32Limit));
6646 if (iSegReg == X86_SREG_SS)
6647 vmxHCSetPendingXcptSS(pVCpu, 0);
6648 else
6649 vmxHCSetPendingXcptGP(pVCpu, 0);
6650 return VINF_HM_PENDING_XCPT;
6651 }
6652 }
6653 }
6654 else
6655 {
6656 /* Check permissions for the code segment. */
6657 if ( enmMemAccess == VMXMEMACCESS_WRITE
6658 || ( enmMemAccess == VMXMEMACCESS_READ
6659 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6660 {
6661 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6662 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6663 vmxHCSetPendingXcptGP(pVCpu, 0);
6664 return VINF_HM_PENDING_XCPT;
6665 }
6666
6667 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6668 if ( GCPtrFirst32 > pSel->u32Limit
6669 || GCPtrLast32 > pSel->u32Limit)
6670 {
6671 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6672 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6673 if (iSegReg == X86_SREG_SS)
6674 vmxHCSetPendingXcptSS(pVCpu, 0);
6675 else
6676 vmxHCSetPendingXcptGP(pVCpu, 0);
6677 return VINF_HM_PENDING_XCPT;
6678 }
6679 }
6680 }
6681 else
6682 {
6683 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6684 vmxHCSetPendingXcptGP(pVCpu, 0);
6685 return VINF_HM_PENDING_XCPT;
6686 }
6687
6688 *pGCPtrMem = GCPtrMem;
6689 return VINF_SUCCESS;
6690}
6691#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6692
6693
6694/**
6695 * VM-exit helper for LMSW.
6696 */
6697static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6698{
6699 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6700 AssertRCReturn(rc, rc);
6701
6702 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6703 AssertMsg( rcStrict == VINF_SUCCESS
6704 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6705
6706 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6707 if (rcStrict == VINF_IEM_RAISED_XCPT)
6708 {
6709 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6710 rcStrict = VINF_SUCCESS;
6711 }
6712
6713 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6714 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6715 return rcStrict;
6716}
6717
6718
6719/**
6720 * VM-exit helper for CLTS.
6721 */
6722static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6723{
6724 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6725 AssertRCReturn(rc, rc);
6726
6727 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6728 AssertMsg( rcStrict == VINF_SUCCESS
6729 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6730
6731 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6732 if (rcStrict == VINF_IEM_RAISED_XCPT)
6733 {
6734 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6735 rcStrict = VINF_SUCCESS;
6736 }
6737
6738 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6739 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6740 return rcStrict;
6741}
6742
6743
6744/**
6745 * VM-exit helper for MOV from CRx (CRx read).
6746 */
6747static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6748{
6749 Assert(iCrReg < 16);
6750 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6751
6752 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6753 AssertRCReturn(rc, rc);
6754
6755 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6756 AssertMsg( rcStrict == VINF_SUCCESS
6757 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6758
6759 if (iGReg == X86_GREG_xSP)
6760 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6761 else
6762 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6763#ifdef VBOX_WITH_STATISTICS
6764 switch (iCrReg)
6765 {
6766 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6767 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6768 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6769 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6770 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6771 }
6772#endif
6773 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6774 return rcStrict;
6775}
6776
6777
6778/**
6779 * VM-exit helper for MOV to CRx (CRx write).
6780 */
6781static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6782{
6783 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6784
6785 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6786 AssertMsg( rcStrict == VINF_SUCCESS
6787 || rcStrict == VINF_IEM_RAISED_XCPT
6788 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6789
6790 switch (iCrReg)
6791 {
6792 case 0:
6793 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6794 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6795 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6796 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6797 break;
6798
6799 case 2:
6800 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6801 /* Nothing to do here, CR2 it's not part of the VMCS. */
6802 break;
6803
6804 case 3:
6805 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6806 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6807 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6808 break;
6809
6810 case 4:
6811 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6812 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6813#ifndef IN_NEM_DARWIN
6814 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6815 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6816#else
6817 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6818#endif
6819 break;
6820
6821 case 8:
6822 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6823 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6824 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6825 break;
6826
6827 default:
6828 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6829 break;
6830 }
6831
6832 if (rcStrict == VINF_IEM_RAISED_XCPT)
6833 {
6834 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6835 rcStrict = VINF_SUCCESS;
6836 }
6837 return rcStrict;
6838}
6839
6840
6841/**
6842 * VM-exit exception handler for \#PF (Page-fault exception).
6843 *
6844 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6845 */
6846static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6847{
6848 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6849 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6850
6851#ifndef IN_NEM_DARWIN
6852 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6853 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6854 { /* likely */ }
6855 else
6856#endif
6857 {
6858#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6859 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6860#endif
6861 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6862 if (!pVmxTransient->fVectoringDoublePF)
6863 {
6864 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6865 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6866 }
6867 else
6868 {
6869 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6870 Assert(!pVmxTransient->fIsNestedGuest);
6871 vmxHCSetPendingXcptDF(pVCpu);
6872 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6873 }
6874 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6875 return VINF_SUCCESS;
6876 }
6877
6878 Assert(!pVmxTransient->fIsNestedGuest);
6879
6880 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6881 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6882 if (pVmxTransient->fVectoringPF)
6883 {
6884 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6885 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6886 }
6887
6888 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6889 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6890 AssertRCReturn(rc, rc);
6891
6892 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
6893 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
6894
6895 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6896 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
6897
6898 Log4Func(("#PF: rc=%Rrc\n", rc));
6899 if (rc == VINF_SUCCESS)
6900 {
6901 /*
6902 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6903 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6904 */
6905 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6906 TRPMResetTrap(pVCpu);
6907 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6908 return rc;
6909 }
6910
6911 if (rc == VINF_EM_RAW_GUEST_TRAP)
6912 {
6913 if (!pVmxTransient->fVectoringDoublePF)
6914 {
6915 /* It's a guest page fault and needs to be reflected to the guest. */
6916 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6917 TRPMResetTrap(pVCpu);
6918 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6919 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6920 uGstErrorCode, pVmxTransient->uExitQual);
6921 }
6922 else
6923 {
6924 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6925 TRPMResetTrap(pVCpu);
6926 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6927 vmxHCSetPendingXcptDF(pVCpu);
6928 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6929 }
6930
6931 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6932 return VINF_SUCCESS;
6933 }
6934
6935 TRPMResetTrap(pVCpu);
6936 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6937 return rc;
6938}
6939
6940
6941/**
6942 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6943 *
6944 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6945 */
6946static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6947{
6948 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6949 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6950
6951 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6952 AssertRCReturn(rc, rc);
6953
6954 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6955 {
6956 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6957 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6958
6959 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6960 * provides VM-exit instruction length. If this causes problem later,
6961 * disassemble the instruction like it's done on AMD-V. */
6962 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6963 AssertRCReturn(rc2, rc2);
6964 return rc;
6965 }
6966
6967 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6968 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6969 return VINF_SUCCESS;
6970}
6971
6972
6973/**
6974 * VM-exit exception handler for \#BP (Breakpoint exception).
6975 *
6976 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6977 */
6978static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6979{
6980 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6981 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6982
6983 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6984 AssertRCReturn(rc, rc);
6985
6986 VBOXSTRICTRC rcStrict;
6987 if (!pVmxTransient->fIsNestedGuest)
6988 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
6989 else
6990 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6991
6992 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6993 {
6994 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6995 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6996 rcStrict = VINF_SUCCESS;
6997 }
6998
6999 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
7000 return rcStrict;
7001}
7002
7003
7004/**
7005 * VM-exit exception handler for \#AC (Alignment-check exception).
7006 *
7007 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7008 */
7009static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7010{
7011 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7012
7013 /*
7014 * Detect #ACs caused by host having enabled split-lock detection.
7015 * Emulate such instructions.
7016 */
7017#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
7018 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7019 AssertRCReturn(rc, rc);
7020 /** @todo detect split lock in cpu feature? */
7021 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
7022 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7023 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
7024 || CPUMGetGuestCPL(pVCpu) != 3
7025 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
7026 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
7027 {
7028 /*
7029 * Check for debug/trace events and import state accordingly.
7030 */
7031 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
7032 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7033 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7034#ifndef IN_NEM_DARWIN
7035 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7036#endif
7037 )
7038 {
7039 if (pVM->cCpus == 1)
7040 {
7041#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7042 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7043 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7044#else
7045 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7046 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7047#endif
7048 AssertRCReturn(rc, rc);
7049 }
7050 }
7051 else
7052 {
7053 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7054 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7055 AssertRCReturn(rc, rc);
7056
7057 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7058
7059 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7060 {
7061 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7062 if (rcStrict != VINF_SUCCESS)
7063 return rcStrict;
7064 }
7065 }
7066
7067 /*
7068 * Emulate the instruction.
7069 *
7070 * We have to ignore the LOCK prefix here as we must not retrigger the
7071 * detection on the host. This isn't all that satisfactory, though...
7072 */
7073 if (pVM->cCpus == 1)
7074 {
7075 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7076 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7077
7078 /** @todo For SMP configs we should do a rendezvous here. */
7079 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7080 if (rcStrict == VINF_SUCCESS)
7081#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7082 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7083 HM_CHANGED_GUEST_RIP
7084 | HM_CHANGED_GUEST_RFLAGS
7085 | HM_CHANGED_GUEST_GPRS_MASK
7086 | HM_CHANGED_GUEST_CS
7087 | HM_CHANGED_GUEST_SS);
7088#else
7089 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7090#endif
7091 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7092 {
7093 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7094 rcStrict = VINF_SUCCESS;
7095 }
7096 return rcStrict;
7097 }
7098 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7099 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7100 return VINF_EM_EMULATE_SPLIT_LOCK;
7101 }
7102
7103 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7104 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7105 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7106
7107 /* Re-inject it. We'll detect any nesting before getting here. */
7108 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7109 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7110 return VINF_SUCCESS;
7111}
7112
7113
7114/**
7115 * VM-exit exception handler for \#DB (Debug exception).
7116 *
7117 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7118 */
7119static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7120{
7121 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7122 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7123
7124 /*
7125 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7126 */
7127 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7128
7129 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7130 uint64_t const uDR6 = X86_DR6_INIT_VAL
7131 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7132 | X86_DR6_BD | X86_DR6_BS));
7133
7134 int rc;
7135 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7136 if (!pVmxTransient->fIsNestedGuest)
7137 {
7138 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7139
7140 /*
7141 * Prevents stepping twice over the same instruction when the guest is stepping using
7142 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7143 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7144 */
7145 if ( rc == VINF_EM_DBG_STEPPED
7146 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7147 {
7148 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7149 rc = VINF_EM_RAW_GUEST_TRAP;
7150 }
7151 }
7152 else
7153 rc = VINF_EM_RAW_GUEST_TRAP;
7154 Log6Func(("rc=%Rrc\n", rc));
7155 if (rc == VINF_EM_RAW_GUEST_TRAP)
7156 {
7157 /*
7158 * The exception was for the guest. Update DR6, DR7.GD and
7159 * IA32_DEBUGCTL.LBR before forwarding it.
7160 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
7161 */
7162#ifndef IN_NEM_DARWIN
7163 VMMRZCallRing3Disable(pVCpu);
7164 HM_DISABLE_PREEMPT(pVCpu);
7165
7166 pCtx->dr[6] &= ~X86_DR6_B_MASK;
7167 pCtx->dr[6] |= uDR6;
7168 if (CPUMIsGuestDebugStateActive(pVCpu))
7169 ASMSetDR6(pCtx->dr[6]);
7170
7171 HM_RESTORE_PREEMPT();
7172 VMMRZCallRing3Enable(pVCpu);
7173#else
7174 /** @todo */
7175#endif
7176
7177 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7178 AssertRCReturn(rc, rc);
7179
7180 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7181 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
7182
7183 /* Paranoia. */
7184 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7185 pCtx->dr[7] |= X86_DR7_RA1_MASK;
7186
7187 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
7188 AssertRC(rc);
7189
7190 /*
7191 * Raise #DB in the guest.
7192 *
7193 * It is important to reflect exactly what the VM-exit gave us (preserving the
7194 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7195 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7196 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7197 *
7198 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7199 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7200 */
7201 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7202 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7203 return VINF_SUCCESS;
7204 }
7205
7206 /*
7207 * Not a guest trap, must be a hypervisor related debug event then.
7208 * Update DR6 in case someone is interested in it.
7209 */
7210 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7211 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7212 CPUMSetHyperDR6(pVCpu, uDR6);
7213
7214 return rc;
7215}
7216
7217
7218/**
7219 * Hacks its way around the lovely mesa driver's backdoor accesses.
7220 *
7221 * @sa hmR0SvmHandleMesaDrvGp.
7222 */
7223static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7224{
7225 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7226 RT_NOREF(pCtx);
7227
7228 /* For now we'll just skip the instruction. */
7229 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7230}
7231
7232
7233/**
7234 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7235 * backdoor logging w/o checking what it is running inside.
7236 *
7237 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7238 * backdoor port and magic numbers loaded in registers.
7239 *
7240 * @returns true if it is, false if it isn't.
7241 * @sa hmR0SvmIsMesaDrvGp.
7242 */
7243DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7244{
7245 /* 0xed: IN eAX,dx */
7246 uint8_t abInstr[1];
7247 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7248 return false;
7249
7250 /* Check that it is #GP(0). */
7251 if (pVmxTransient->uExitIntErrorCode != 0)
7252 return false;
7253
7254 /* Check magic and port. */
7255 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7256 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7257 if (pCtx->rax != UINT32_C(0x564d5868))
7258 return false;
7259 if (pCtx->dx != UINT32_C(0x5658))
7260 return false;
7261
7262 /* Flat ring-3 CS. */
7263 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7264 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7265 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7266 if (pCtx->cs.Attr.n.u2Dpl != 3)
7267 return false;
7268 if (pCtx->cs.u64Base != 0)
7269 return false;
7270
7271 /* Check opcode. */
7272 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7273 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7274 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7275 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7276 if (RT_FAILURE(rc))
7277 return false;
7278 if (abInstr[0] != 0xed)
7279 return false;
7280
7281 return true;
7282}
7283
7284
7285/**
7286 * VM-exit exception handler for \#GP (General-protection exception).
7287 *
7288 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7289 */
7290static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7291{
7292 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7293 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7294
7295 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7296 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7297#ifndef IN_NEM_DARWIN
7298 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7299 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7300 { /* likely */ }
7301 else
7302#endif
7303 {
7304#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7305# ifndef IN_NEM_DARWIN
7306 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7307# else
7308 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7309# endif
7310#endif
7311 /*
7312 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7313 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7314 */
7315 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7316 AssertRCReturn(rc, rc);
7317 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7318 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7319
7320 if ( pVmxTransient->fIsNestedGuest
7321 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7322 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7323 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7324 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7325 else
7326 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7327 return rc;
7328 }
7329
7330#ifndef IN_NEM_DARWIN
7331 Assert(CPUMIsGuestInRealModeEx(pCtx));
7332 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7333 Assert(!pVmxTransient->fIsNestedGuest);
7334
7335 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7336 AssertRCReturn(rc, rc);
7337
7338 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7339 if (rcStrict == VINF_SUCCESS)
7340 {
7341 if (!CPUMIsGuestInRealModeEx(pCtx))
7342 {
7343 /*
7344 * The guest is no longer in real-mode, check if we can continue executing the
7345 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7346 */
7347 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7348 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7349 {
7350 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7351 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7352 }
7353 else
7354 {
7355 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7356 rcStrict = VINF_EM_RESCHEDULE;
7357 }
7358 }
7359 else
7360 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7361 }
7362 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7363 {
7364 rcStrict = VINF_SUCCESS;
7365 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7366 }
7367 return VBOXSTRICTRC_VAL(rcStrict);
7368#endif
7369}
7370
7371
7372/**
7373 * VM-exit exception handler for \#DE (Divide Error).
7374 *
7375 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7376 */
7377static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7378{
7379 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7380 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7381
7382 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7383 AssertRCReturn(rc, rc);
7384
7385 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7386 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7387 {
7388 uint8_t cbInstr = 0;
7389 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7390 if (rc2 == VINF_SUCCESS)
7391 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7392 else if (rc2 == VERR_NOT_FOUND)
7393 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7394 else
7395 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7396 }
7397 else
7398 rcStrict = VINF_SUCCESS; /* Do nothing. */
7399
7400 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7401 if (RT_FAILURE(rcStrict))
7402 {
7403 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7404 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7405 rcStrict = VINF_SUCCESS;
7406 }
7407
7408 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7409 return VBOXSTRICTRC_VAL(rcStrict);
7410}
7411
7412
7413/**
7414 * VM-exit exception handler wrapper for all other exceptions that are not handled
7415 * by a specific handler.
7416 *
7417 * This simply re-injects the exception back into the VM without any special
7418 * processing.
7419 *
7420 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7421 */
7422static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7423{
7424 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7425
7426#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7427# ifndef IN_NEM_DARWIN
7428 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7429 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7430 ("uVector=%#x u32XcptBitmap=%#X32\n",
7431 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7432 NOREF(pVmcsInfo);
7433# endif
7434#endif
7435
7436 /*
7437 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7438 * would have been handled while checking exits due to event delivery.
7439 */
7440 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7441
7442#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7443 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7444 AssertRCReturn(rc, rc);
7445 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7446#endif
7447
7448#ifdef VBOX_WITH_STATISTICS
7449 switch (uVector)
7450 {
7451 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7452 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7453 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7454 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7455 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7456 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7457 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7458 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7459 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7460 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7461 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7462 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7463 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7464 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7465 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7466 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7467 default:
7468 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7469 break;
7470 }
7471#endif
7472
7473 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7474 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7475 NOREF(uVector);
7476
7477 /* Re-inject the original exception into the guest. */
7478 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7479 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7480 return VINF_SUCCESS;
7481}
7482
7483
7484/**
7485 * VM-exit exception handler for all exceptions (except NMIs!).
7486 *
7487 * @remarks This may be called for both guests and nested-guests. Take care to not
7488 * make assumptions and avoid doing anything that is not relevant when
7489 * executing a nested-guest (e.g., Mesa driver hacks).
7490 */
7491static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7492{
7493 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7494
7495 /*
7496 * If this VM-exit occurred while delivering an event through the guest IDT, take
7497 * action based on the return code and additional hints (e.g. for page-faults)
7498 * that will be updated in the VMX transient structure.
7499 */
7500 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7501 if (rcStrict == VINF_SUCCESS)
7502 {
7503 /*
7504 * If an exception caused a VM-exit due to delivery of an event, the original
7505 * event may have to be re-injected into the guest. We shall reinject it and
7506 * continue guest execution. However, page-fault is a complicated case and
7507 * needs additional processing done in vmxHCExitXcptPF().
7508 */
7509 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7510 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7511 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7512 || uVector == X86_XCPT_PF)
7513 {
7514 switch (uVector)
7515 {
7516 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7517 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7518 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7519 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7520 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7521 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7522 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7523 default:
7524 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7525 }
7526 }
7527 /* else: inject pending event before resuming guest execution. */
7528 }
7529 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7530 {
7531 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7532 rcStrict = VINF_SUCCESS;
7533 }
7534
7535 return rcStrict;
7536}
7537/** @} */
7538
7539
7540/** @name VM-exit handlers.
7541 * @{
7542 */
7543/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7544/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7545/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7546
7547/**
7548 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7549 */
7550HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7551{
7552 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7553 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7554
7555#ifndef IN_NEM_DARWIN
7556 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7557 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7558 return VINF_SUCCESS;
7559 return VINF_EM_RAW_INTERRUPT;
7560#else
7561 return VINF_SUCCESS;
7562#endif
7563}
7564
7565
7566/**
7567 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7568 * VM-exit.
7569 */
7570HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7571{
7572 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7573 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7574
7575 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7576
7577 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7578 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7579 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7580
7581 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7582 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7583 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7584 NOREF(pVmcsInfo);
7585
7586 VBOXSTRICTRC rcStrict;
7587 switch (uExitIntType)
7588 {
7589#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7590 /*
7591 * Host physical NMIs:
7592 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7593 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7594 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7595 *
7596 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7597 * See Intel spec. 27.5.5 "Updating Non-Register State".
7598 */
7599 case VMX_EXIT_INT_INFO_TYPE_NMI:
7600 {
7601 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7602 break;
7603 }
7604#endif
7605
7606 /*
7607 * Privileged software exceptions (#DB from ICEBP),
7608 * Software exceptions (#BP and #OF),
7609 * Hardware exceptions:
7610 * Process the required exceptions and resume guest execution if possible.
7611 */
7612 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7613 Assert(uVector == X86_XCPT_DB);
7614 RT_FALL_THRU();
7615 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7616 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7617 RT_FALL_THRU();
7618 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7619 {
7620 NOREF(uVector);
7621 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7622 | HMVMX_READ_EXIT_INSTR_LEN
7623 | HMVMX_READ_IDT_VECTORING_INFO
7624 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7625 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7626 break;
7627 }
7628
7629 default:
7630 {
7631 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7632 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7633 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7634 break;
7635 }
7636 }
7637
7638 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7639 return rcStrict;
7640}
7641
7642
7643/**
7644 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7645 */
7646HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7647{
7648 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7649
7650 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7651 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7652 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7653
7654 /* Evaluate and deliver pending events and resume guest execution. */
7655 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7656 return VINF_SUCCESS;
7657}
7658
7659
7660/**
7661 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7662 */
7663HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7664{
7665 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7666
7667 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7668 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7669 {
7670 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7671 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7672 }
7673
7674 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
7675
7676 /*
7677 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7678 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7679 */
7680 uint32_t fIntrState;
7681 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7682 AssertRC(rc);
7683 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7684 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7685 {
7686 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7687 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7688
7689 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7690 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7691 AssertRC(rc);
7692 }
7693
7694 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7695 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7696
7697 /* Evaluate and deliver pending events and resume guest execution. */
7698 return VINF_SUCCESS;
7699}
7700
7701
7702/**
7703 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7704 */
7705HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7706{
7707 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7708 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7709}
7710
7711
7712/**
7713 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7714 */
7715HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7716{
7717 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7718 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7719}
7720
7721
7722/**
7723 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7724 */
7725HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7726{
7727 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7728
7729 /*
7730 * Get the state we need and update the exit history entry.
7731 */
7732 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7733 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7734 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7735 AssertRCReturn(rc, rc);
7736
7737 VBOXSTRICTRC rcStrict;
7738 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7739 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7740 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7741 if (!pExitRec)
7742 {
7743 /*
7744 * Regular CPUID instruction execution.
7745 */
7746 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7747 if (rcStrict == VINF_SUCCESS)
7748 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7749 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7750 {
7751 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7752 rcStrict = VINF_SUCCESS;
7753 }
7754 }
7755 else
7756 {
7757 /*
7758 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7759 */
7760 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7761 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7762 AssertRCReturn(rc2, rc2);
7763
7764 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7765 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7766
7767 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7768 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7769
7770 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7771 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7772 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7773 }
7774 return rcStrict;
7775}
7776
7777
7778/**
7779 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7780 */
7781HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7782{
7783 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7784
7785 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7786 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7787 AssertRCReturn(rc, rc);
7788
7789 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7790 return VINF_EM_RAW_EMULATE_INSTR;
7791
7792 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7793 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7794}
7795
7796
7797/**
7798 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7799 */
7800HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7801{
7802 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7803
7804 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7805 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7806 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7807 AssertRCReturn(rc, rc);
7808
7809 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7810 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7811 {
7812 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7813 we must reset offsetting on VM-entry. See @bugref{6634}. */
7814 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7815 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7816 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7817 }
7818 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7819 {
7820 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7821 rcStrict = VINF_SUCCESS;
7822 }
7823 return rcStrict;
7824}
7825
7826
7827/**
7828 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7829 */
7830HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7831{
7832 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7833
7834 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7835 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7836 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7837 AssertRCReturn(rc, rc);
7838
7839 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7840 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7841 {
7842 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7843 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7844 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7845 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7846 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7847 }
7848 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7849 {
7850 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7851 rcStrict = VINF_SUCCESS;
7852 }
7853 return rcStrict;
7854}
7855
7856
7857/**
7858 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7859 */
7860HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7861{
7862 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7863
7864 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7865 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
7866 | CPUMCTX_EXTRN_CR0
7867 | CPUMCTX_EXTRN_RFLAGS
7868 | CPUMCTX_EXTRN_RIP
7869 | CPUMCTX_EXTRN_SS>(pVCpu, pVmcsInfo, __FUNCTION__);
7870 AssertRCReturn(rc, rc);
7871
7872 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7873 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7874 if (RT_LIKELY(rc == VINF_SUCCESS))
7875 {
7876 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7877 Assert(pVmxTransient->cbExitInstr == 2);
7878 }
7879 else
7880 {
7881 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7882 rc = VERR_EM_INTERPRETER;
7883 }
7884 return rc;
7885}
7886
7887
7888/**
7889 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7890 */
7891HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7892{
7893 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7894
7895 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7896 if (EMAreHypercallInstructionsEnabled(pVCpu))
7897 {
7898 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7899 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
7900 | CPUMCTX_EXTRN_RFLAGS
7901 | CPUMCTX_EXTRN_CR0
7902 | CPUMCTX_EXTRN_SS
7903 | CPUMCTX_EXTRN_CS
7904 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
7905 AssertRCReturn(rc, rc);
7906
7907 /* Perform the hypercall. */
7908 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7909 if (rcStrict == VINF_SUCCESS)
7910 {
7911 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7912 AssertRCReturn(rc, rc);
7913 }
7914 else
7915 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7916 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7917 || RT_FAILURE(rcStrict));
7918
7919 /* If the hypercall changes anything other than guest's general-purpose registers,
7920 we would need to reload the guest changed bits here before VM-entry. */
7921 }
7922 else
7923 Log4Func(("Hypercalls not enabled\n"));
7924
7925 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7926 if (RT_FAILURE(rcStrict))
7927 {
7928 vmxHCSetPendingXcptUD(pVCpu);
7929 rcStrict = VINF_SUCCESS;
7930 }
7931
7932 return rcStrict;
7933}
7934
7935
7936/**
7937 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7938 */
7939HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7940{
7941 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7942#ifndef IN_NEM_DARWIN
7943 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7944#endif
7945
7946 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7947 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
7948 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7949 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7950 AssertRCReturn(rc, rc);
7951
7952 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7953
7954 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7955 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7956 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7957 {
7958 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7959 rcStrict = VINF_SUCCESS;
7960 }
7961 else
7962 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7963 VBOXSTRICTRC_VAL(rcStrict)));
7964 return rcStrict;
7965}
7966
7967
7968/**
7969 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7970 */
7971HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7972{
7973 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7974
7975 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7976 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7977 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
7978 AssertRCReturn(rc, rc);
7979
7980 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7981 if (rcStrict == VINF_SUCCESS)
7982 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7983 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7984 {
7985 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7986 rcStrict = VINF_SUCCESS;
7987 }
7988
7989 return rcStrict;
7990}
7991
7992
7993/**
7994 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7995 */
7996HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7997{
7998 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7999
8000 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8001 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8002 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8003 AssertRCReturn(rc, rc);
8004
8005 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
8006 if (RT_SUCCESS(rcStrict))
8007 {
8008 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8009 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
8010 rcStrict = VINF_SUCCESS;
8011 }
8012
8013 return rcStrict;
8014}
8015
8016
8017/**
8018 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
8019 * VM-exit.
8020 */
8021HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8022{
8023 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8024 return VINF_EM_RESET;
8025}
8026
8027
8028/**
8029 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8030 */
8031HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8032{
8033 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8034
8035 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8036 AssertRCReturn(rc, rc);
8037
8038 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
8039 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8040 rc = VINF_SUCCESS;
8041 else
8042 rc = VINF_EM_HALT;
8043
8044 if (rc != VINF_SUCCESS)
8045 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8046 return rc;
8047}
8048
8049
8050#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8051/**
8052 * VM-exit handler for instructions that result in a \#UD exception delivered to
8053 * the guest.
8054 */
8055HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8056{
8057 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8058 vmxHCSetPendingXcptUD(pVCpu);
8059 return VINF_SUCCESS;
8060}
8061#endif
8062
8063
8064/**
8065 * VM-exit handler for expiry of the VMX-preemption timer.
8066 */
8067HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8068{
8069 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8070
8071 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8072 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8073Log12(("vmxHCExitPreemptTimer:\n"));
8074
8075 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8076 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8077 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8078 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8079 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8080}
8081
8082
8083/**
8084 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8085 */
8086HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8087{
8088 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8089
8090 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8091 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8092 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8093 AssertRCReturn(rc, rc);
8094
8095 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8096 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8097 : HM_CHANGED_RAISED_XCPT_MASK);
8098
8099#ifndef IN_NEM_DARWIN
8100 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8101 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8102 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8103 {
8104 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8105 hmR0VmxUpdateStartVmFunction(pVCpu);
8106 }
8107#endif
8108
8109 return rcStrict;
8110}
8111
8112
8113/**
8114 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8115 */
8116HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8117{
8118 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8119
8120 /** @todo Enable the new code after finding a reliably guest test-case. */
8121#if 1
8122 return VERR_EM_INTERPRETER;
8123#else
8124 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8125 | HMVMX_READ_EXIT_INSTR_INFO
8126 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8127 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8128 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8129 AssertRCReturn(rc, rc);
8130
8131 /* Paranoia. Ensure this has a memory operand. */
8132 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8133
8134 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8135 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8136 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8137 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8138
8139 RTGCPTR GCPtrDesc;
8140 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8141
8142 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8143 GCPtrDesc, uType);
8144 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8145 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8146 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8147 {
8148 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8149 rcStrict = VINF_SUCCESS;
8150 }
8151 return rcStrict;
8152#endif
8153}
8154
8155
8156/**
8157 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8158 * VM-exit.
8159 */
8160HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8161{
8162 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8163 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8164 AssertRCReturn(rc, rc);
8165
8166 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8167 if (RT_FAILURE(rc))
8168 return rc;
8169
8170 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8171 NOREF(uInvalidReason);
8172
8173#ifdef VBOX_STRICT
8174 uint32_t fIntrState;
8175 uint64_t u64Val;
8176 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8177 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8178 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8179
8180 Log4(("uInvalidReason %u\n", uInvalidReason));
8181 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8182 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8183 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8184
8185 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8186 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8187 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8188 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8189 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8190 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8191 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8192 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8193 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8194 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8195 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8196 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8197# ifndef IN_NEM_DARWIN
8198 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8199 {
8200 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8201 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8202 }
8203
8204 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8205# endif
8206#endif
8207
8208 return VERR_VMX_INVALID_GUEST_STATE;
8209}
8210
8211/**
8212 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8213 */
8214HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8215{
8216 /*
8217 * Cumulative notes of all recognized but unexpected VM-exits.
8218 *
8219 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8220 * nested-paging is used.
8221 *
8222 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8223 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8224 * this function (and thereby stop VM execution) for handling such instructions.
8225 *
8226 *
8227 * VMX_EXIT_INIT_SIGNAL:
8228 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8229 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8230 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8231 *
8232 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8233 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8234 * See Intel spec. "23.8 Restrictions on VMX operation".
8235 *
8236 * VMX_EXIT_SIPI:
8237 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8238 * activity state is used. We don't make use of it as our guests don't have direct
8239 * access to the host local APIC.
8240 *
8241 * See Intel spec. 25.3 "Other Causes of VM-exits".
8242 *
8243 * VMX_EXIT_IO_SMI:
8244 * VMX_EXIT_SMI:
8245 * This can only happen if we support dual-monitor treatment of SMI, which can be
8246 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8247 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8248 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8249 *
8250 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8251 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8252 *
8253 * VMX_EXIT_ERR_MSR_LOAD:
8254 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8255 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8256 * execution.
8257 *
8258 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8259 *
8260 * VMX_EXIT_ERR_MACHINE_CHECK:
8261 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8262 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8263 * #MC exception abort class exception is raised. We thus cannot assume a
8264 * reasonable chance of continuing any sort of execution and we bail.
8265 *
8266 * See Intel spec. 15.1 "Machine-check Architecture".
8267 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8268 *
8269 * VMX_EXIT_PML_FULL:
8270 * VMX_EXIT_VIRTUALIZED_EOI:
8271 * VMX_EXIT_APIC_WRITE:
8272 * We do not currently support any of these features and thus they are all unexpected
8273 * VM-exits.
8274 *
8275 * VMX_EXIT_GDTR_IDTR_ACCESS:
8276 * VMX_EXIT_LDTR_TR_ACCESS:
8277 * VMX_EXIT_RDRAND:
8278 * VMX_EXIT_RSM:
8279 * VMX_EXIT_VMFUNC:
8280 * VMX_EXIT_ENCLS:
8281 * VMX_EXIT_RDSEED:
8282 * VMX_EXIT_XSAVES:
8283 * VMX_EXIT_XRSTORS:
8284 * VMX_EXIT_UMWAIT:
8285 * VMX_EXIT_TPAUSE:
8286 * VMX_EXIT_LOADIWKEY:
8287 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8288 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8289 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8290 *
8291 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8292 */
8293 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8294 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8295 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8296}
8297
8298
8299/**
8300 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8301 */
8302HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8303{
8304 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8305
8306 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8307
8308 /** @todo Optimize this: We currently drag in the whole MSR state
8309 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8310 * MSRs required. That would require changes to IEM and possibly CPUM too.
8311 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8312 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8313 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8314 int rc;
8315 switch (idMsr)
8316 {
8317 default:
8318 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8319 __FUNCTION__);
8320 AssertRCReturn(rc, rc);
8321 break;
8322 case MSR_K8_FS_BASE:
8323 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8324 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8325 AssertRCReturn(rc, rc);
8326 break;
8327 case MSR_K8_GS_BASE:
8328 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8329 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8330 AssertRCReturn(rc, rc);
8331 break;
8332 }
8333
8334 Log4Func(("ecx=%#RX32\n", idMsr));
8335
8336#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8337 Assert(!pVmxTransient->fIsNestedGuest);
8338 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8339 {
8340 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8341 && idMsr != MSR_K6_EFER)
8342 {
8343 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8344 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8345 }
8346 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8347 {
8348 Assert(pVmcsInfo->pvMsrBitmap);
8349 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8350 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8351 {
8352 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8353 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8354 }
8355 }
8356 }
8357#endif
8358
8359 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8360 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8361 if (rcStrict == VINF_SUCCESS)
8362 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8363 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8364 {
8365 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8366 rcStrict = VINF_SUCCESS;
8367 }
8368 else
8369 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8370 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8371
8372 return rcStrict;
8373}
8374
8375
8376/**
8377 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8378 */
8379HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8380{
8381 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8382
8383 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8384
8385 /*
8386 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8387 * Although we don't need to fetch the base as it will be overwritten shortly, while
8388 * loading guest-state we would also load the entire segment register including limit
8389 * and attributes and thus we need to load them here.
8390 */
8391 /** @todo Optimize this: We currently drag in the whole MSR state
8392 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8393 * MSRs required. That would require changes to IEM and possibly CPUM too.
8394 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8395 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8396 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8397 int rc;
8398 switch (idMsr)
8399 {
8400 default:
8401 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8402 __FUNCTION__);
8403 AssertRCReturn(rc, rc);
8404 break;
8405
8406 case MSR_K8_FS_BASE:
8407 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8408 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8409 AssertRCReturn(rc, rc);
8410 break;
8411 case MSR_K8_GS_BASE:
8412 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8413 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8414 AssertRCReturn(rc, rc);
8415 break;
8416 }
8417 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8418
8419 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8420 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8421
8422 if (rcStrict == VINF_SUCCESS)
8423 {
8424 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8425
8426 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8427 if ( idMsr == MSR_IA32_APICBASE
8428 || ( idMsr >= MSR_IA32_X2APIC_START
8429 && idMsr <= MSR_IA32_X2APIC_END))
8430 {
8431 /*
8432 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8433 * When full APIC register virtualization is implemented we'll have to make
8434 * sure APIC state is saved from the VMCS before IEM changes it.
8435 */
8436 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8437 }
8438 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8439 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8440 else if (idMsr == MSR_K6_EFER)
8441 {
8442 /*
8443 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8444 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8445 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8446 */
8447 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8448 }
8449
8450 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8451 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8452 {
8453 switch (idMsr)
8454 {
8455 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8456 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8457 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8458 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8459 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8460 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8461 default:
8462 {
8463#ifndef IN_NEM_DARWIN
8464 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8465 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8466 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8467 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8468#else
8469 AssertMsgFailed(("TODO\n"));
8470#endif
8471 break;
8472 }
8473 }
8474 }
8475#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8476 else
8477 {
8478 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8479 switch (idMsr)
8480 {
8481 case MSR_IA32_SYSENTER_CS:
8482 case MSR_IA32_SYSENTER_EIP:
8483 case MSR_IA32_SYSENTER_ESP:
8484 case MSR_K8_FS_BASE:
8485 case MSR_K8_GS_BASE:
8486 {
8487 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8488 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8489 }
8490
8491 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8492 default:
8493 {
8494 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8495 {
8496 /* EFER MSR writes are always intercepted. */
8497 if (idMsr != MSR_K6_EFER)
8498 {
8499 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8500 idMsr));
8501 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8502 }
8503 }
8504
8505 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8506 {
8507 Assert(pVmcsInfo->pvMsrBitmap);
8508 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8509 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8510 {
8511 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8512 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8513 }
8514 }
8515 break;
8516 }
8517 }
8518 }
8519#endif /* VBOX_STRICT */
8520 }
8521 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8522 {
8523 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8524 rcStrict = VINF_SUCCESS;
8525 }
8526 else
8527 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8528 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8529
8530 return rcStrict;
8531}
8532
8533
8534/**
8535 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8536 */
8537HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8538{
8539 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8540
8541 /** @todo The guest has likely hit a contended spinlock. We might want to
8542 * poke a schedule different guest VCPU. */
8543 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8544 if (RT_SUCCESS(rc))
8545 return VINF_EM_RAW_INTERRUPT;
8546
8547 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8548 return rc;
8549}
8550
8551
8552/**
8553 * VM-exit handler for when the TPR value is lowered below the specified
8554 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8555 */
8556HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8557{
8558 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8559 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8560
8561 /*
8562 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8563 * We'll re-evaluate pending interrupts and inject them before the next VM
8564 * entry so we can just continue execution here.
8565 */
8566 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8567 return VINF_SUCCESS;
8568}
8569
8570
8571/**
8572 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8573 * VM-exit.
8574 *
8575 * @retval VINF_SUCCESS when guest execution can continue.
8576 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8577 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8578 * incompatible guest state for VMX execution (real-on-v86 case).
8579 */
8580HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8581{
8582 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8583 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8584
8585 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8586 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8587 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8588
8589 VBOXSTRICTRC rcStrict;
8590 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8591 uint64_t const uExitQual = pVmxTransient->uExitQual;
8592 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8593 switch (uAccessType)
8594 {
8595 /*
8596 * MOV to CRx.
8597 */
8598 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8599 {
8600 /*
8601 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8602 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8603 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8604 * PAE PDPTEs as well.
8605 */
8606 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8607 AssertRCReturn(rc, rc);
8608
8609 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8610#ifndef IN_NEM_DARWIN
8611 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8612#endif
8613 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8614 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8615
8616 /*
8617 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8618 * - When nested paging isn't used.
8619 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8620 * - We are executing in the VM debug loop.
8621 */
8622#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8623# ifndef IN_NEM_DARWIN
8624 Assert( iCrReg != 3
8625 || !VM_IS_VMX_NESTED_PAGING(pVM)
8626 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8627 || pVCpu->hmr0.s.fUsingDebugLoop);
8628# else
8629 Assert( iCrReg != 3
8630 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8631# endif
8632#endif
8633
8634 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8635 Assert( iCrReg != 8
8636 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8637
8638 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8639 AssertMsg( rcStrict == VINF_SUCCESS
8640 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8641
8642#ifndef IN_NEM_DARWIN
8643 /*
8644 * This is a kludge for handling switches back to real mode when we try to use
8645 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8646 * deal with special selector values, so we have to return to ring-3 and run
8647 * there till the selector values are V86 mode compatible.
8648 *
8649 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8650 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8651 * this function.
8652 */
8653 if ( iCrReg == 0
8654 && rcStrict == VINF_SUCCESS
8655 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8656 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8657 && (uOldCr0 & X86_CR0_PE)
8658 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8659 {
8660 /** @todo Check selectors rather than returning all the time. */
8661 Assert(!pVmxTransient->fIsNestedGuest);
8662 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8663 rcStrict = VINF_EM_RESCHEDULE_REM;
8664 }
8665#endif
8666
8667 break;
8668 }
8669
8670 /*
8671 * MOV from CRx.
8672 */
8673 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8674 {
8675 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8676 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8677
8678 /*
8679 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8680 * - When nested paging isn't used.
8681 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8682 * - We are executing in the VM debug loop.
8683 */
8684#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8685# ifndef IN_NEM_DARWIN
8686 Assert( iCrReg != 3
8687 || !VM_IS_VMX_NESTED_PAGING(pVM)
8688 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8689 || pVCpu->hmr0.s.fLeaveDone);
8690# else
8691 Assert( iCrReg != 3
8692 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8693# endif
8694#endif
8695
8696 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8697 Assert( iCrReg != 8
8698 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8699
8700 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8701 break;
8702 }
8703
8704 /*
8705 * CLTS (Clear Task-Switch Flag in CR0).
8706 */
8707 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8708 {
8709 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8710 break;
8711 }
8712
8713 /*
8714 * LMSW (Load Machine-Status Word into CR0).
8715 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8716 */
8717 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8718 {
8719 RTGCPTR GCPtrEffDst;
8720 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8721 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8722 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8723 if (fMemOperand)
8724 {
8725 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8726 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8727 }
8728 else
8729 GCPtrEffDst = NIL_RTGCPTR;
8730 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8731 break;
8732 }
8733
8734 default:
8735 {
8736 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8737 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8738 }
8739 }
8740
8741 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8742 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8743 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8744
8745 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8746 NOREF(pVM);
8747 return rcStrict;
8748}
8749
8750
8751/**
8752 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8753 * VM-exit.
8754 */
8755HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8756{
8757 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8758 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8759
8760 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8761 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8762 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8763 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8764#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8765 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8766 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8767 AssertRCReturn(rc, rc);
8768
8769 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8770 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8771 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8772 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8773 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8774 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8775 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8776 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8777
8778 /*
8779 * Update exit history to see if this exit can be optimized.
8780 */
8781 VBOXSTRICTRC rcStrict;
8782 PCEMEXITREC pExitRec = NULL;
8783 if ( !fGstStepping
8784 && !fDbgStepping)
8785 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8786 !fIOString
8787 ? !fIOWrite
8788 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8789 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8790 : !fIOWrite
8791 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8792 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8793 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8794 if (!pExitRec)
8795 {
8796 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8797 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8798
8799 uint32_t const cbValue = s_aIOSizes[uIOSize];
8800 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8801 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8802 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8803 if (fIOString)
8804 {
8805 /*
8806 * INS/OUTS - I/O String instruction.
8807 *
8808 * Use instruction-information if available, otherwise fall back on
8809 * interpreting the instruction.
8810 */
8811 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8812 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8813 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8814 if (fInsOutsInfo)
8815 {
8816 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8817 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8818 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8819 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8820 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8821 if (fIOWrite)
8822 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8823 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8824 else
8825 {
8826 /*
8827 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8828 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8829 * See Intel Instruction spec. for "INS".
8830 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8831 */
8832 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8833 }
8834 }
8835 else
8836 rcStrict = IEMExecOne(pVCpu);
8837
8838 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8839 fUpdateRipAlready = true;
8840 }
8841 else
8842 {
8843 /*
8844 * IN/OUT - I/O instruction.
8845 */
8846 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8847 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8848 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8849 if (fIOWrite)
8850 {
8851 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8852 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8853#ifndef IN_NEM_DARWIN
8854 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8855 && !pCtx->eflags.Bits.u1TF)
8856 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8857#endif
8858 }
8859 else
8860 {
8861 uint32_t u32Result = 0;
8862 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8863 if (IOM_SUCCESS(rcStrict))
8864 {
8865 /* Save result of I/O IN instr. in AL/AX/EAX. */
8866 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8867 }
8868#ifndef IN_NEM_DARWIN
8869 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8870 && !pCtx->eflags.Bits.u1TF)
8871 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8872#endif
8873 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8874 }
8875 }
8876
8877 if (IOM_SUCCESS(rcStrict))
8878 {
8879 if (!fUpdateRipAlready)
8880 {
8881 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8882 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8883 }
8884
8885 /*
8886 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8887 * while booting Fedora 17 64-bit guest.
8888 *
8889 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8890 */
8891 if (fIOString)
8892 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8893
8894 /*
8895 * If any I/O breakpoints are armed, we need to check if one triggered
8896 * and take appropriate action.
8897 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8898 */
8899#if 1
8900 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
8901#else
8902 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
8903 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
8904 AssertRCReturn(rc, rc);
8905#endif
8906
8907 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8908 * execution engines about whether hyper BPs and such are pending. */
8909 uint32_t const uDr7 = pCtx->dr[7];
8910 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8911 && X86_DR7_ANY_RW_IO(uDr7)
8912 && (pCtx->cr4 & X86_CR4_DE))
8913 || DBGFBpIsHwIoArmed(pVM)))
8914 {
8915 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8916
8917#ifndef IN_NEM_DARWIN
8918 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8919 VMMRZCallRing3Disable(pVCpu);
8920 HM_DISABLE_PREEMPT(pVCpu);
8921
8922 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8923
8924 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8925 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8926 {
8927 /* Raise #DB. */
8928 if (fIsGuestDbgActive)
8929 ASMSetDR6(pCtx->dr[6]);
8930 if (pCtx->dr[7] != uDr7)
8931 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8932
8933 vmxHCSetPendingXcptDB(pVCpu);
8934 }
8935 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8936 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8937 else if ( rcStrict2 != VINF_SUCCESS
8938 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8939 rcStrict = rcStrict2;
8940 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8941
8942 HM_RESTORE_PREEMPT();
8943 VMMRZCallRing3Enable(pVCpu);
8944#else
8945 /** @todo */
8946#endif
8947 }
8948 }
8949
8950#ifdef VBOX_STRICT
8951 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8952 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8953 Assert(!fIOWrite);
8954 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8955 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8956 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8957 Assert(fIOWrite);
8958 else
8959 {
8960# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8961 * statuses, that the VMM device and some others may return. See
8962 * IOM_SUCCESS() for guidance. */
8963 AssertMsg( RT_FAILURE(rcStrict)
8964 || rcStrict == VINF_SUCCESS
8965 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8966 || rcStrict == VINF_EM_DBG_BREAKPOINT
8967 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8968 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8969# endif
8970 }
8971#endif
8972 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8973 }
8974 else
8975 {
8976 /*
8977 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8978 */
8979 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
8980 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8981 AssertRCReturn(rc2, rc2);
8982 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8983 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8984 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8985 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8986 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8987 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8988
8989 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8990 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8991
8992 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8993 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8994 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8995 }
8996 return rcStrict;
8997}
8998
8999
9000/**
9001 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
9002 * VM-exit.
9003 */
9004HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9005{
9006 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9007
9008 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
9009 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9010 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
9011 {
9012 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
9013 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9014 {
9015 uint32_t uErrCode;
9016 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
9017 {
9018 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9019 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
9020 }
9021 else
9022 uErrCode = 0;
9023
9024 RTGCUINTPTR GCPtrFaultAddress;
9025 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
9026 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
9027 else
9028 GCPtrFaultAddress = 0;
9029
9030 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9031
9032 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
9033 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
9034
9035 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
9036 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
9037 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9038 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9039 }
9040 }
9041
9042 /* Fall back to the interpreter to emulate the task-switch. */
9043 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9044 return VERR_EM_INTERPRETER;
9045}
9046
9047
9048/**
9049 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9050 */
9051HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9052{
9053 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9054
9055 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9056 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9057 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9058 AssertRC(rc);
9059 return VINF_EM_DBG_STEPPED;
9060}
9061
9062
9063/**
9064 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9065 */
9066HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9067{
9068 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9069 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9070
9071 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9072 | HMVMX_READ_EXIT_INSTR_LEN
9073 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9074 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9075 | HMVMX_READ_IDT_VECTORING_INFO
9076 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9077
9078 /*
9079 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9080 */
9081 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9082 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9083 {
9084 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9085 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9086 {
9087 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9088 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9089 }
9090 }
9091 else
9092 {
9093 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9094 return rcStrict;
9095 }
9096
9097 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9098 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9099 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9100 AssertRCReturn(rc, rc);
9101
9102 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9103 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9104 switch (uAccessType)
9105 {
9106#ifndef IN_NEM_DARWIN
9107 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9108 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9109 {
9110 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9111 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9112 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9113
9114 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9115 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9116 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9117 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9118 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9119
9120 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9121 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9122 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9123 if ( rcStrict == VINF_SUCCESS
9124 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9125 || rcStrict == VERR_PAGE_NOT_PRESENT)
9126 {
9127 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9128 | HM_CHANGED_GUEST_APIC_TPR);
9129 rcStrict = VINF_SUCCESS;
9130 }
9131 break;
9132 }
9133#else
9134 /** @todo */
9135#endif
9136
9137 default:
9138 {
9139 Log4Func(("uAccessType=%#x\n", uAccessType));
9140 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9141 break;
9142 }
9143 }
9144
9145 if (rcStrict != VINF_SUCCESS)
9146 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9147 return rcStrict;
9148}
9149
9150
9151/**
9152 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9153 * VM-exit.
9154 */
9155HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9156{
9157 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9158 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9159
9160 /*
9161 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9162 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9163 * must emulate the MOV DRx access.
9164 */
9165 if (!pVmxTransient->fIsNestedGuest)
9166 {
9167 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9168 if (pVmxTransient->fWasGuestDebugStateActive)
9169 {
9170 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9171 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9172 }
9173
9174 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9175 && !pVmxTransient->fWasHyperDebugStateActive)
9176 {
9177 Assert(!DBGFIsStepping(pVCpu));
9178 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9179
9180 /* Don't intercept MOV DRx any more. */
9181 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9182 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9183 AssertRC(rc);
9184
9185#ifndef IN_NEM_DARWIN
9186 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9187 VMMRZCallRing3Disable(pVCpu);
9188 HM_DISABLE_PREEMPT(pVCpu);
9189
9190 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9191 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9192 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9193
9194 HM_RESTORE_PREEMPT();
9195 VMMRZCallRing3Enable(pVCpu);
9196#else
9197 CPUMR3NemActivateGuestDebugState(pVCpu);
9198 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9199 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9200#endif
9201
9202#ifdef VBOX_WITH_STATISTICS
9203 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9204 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9205 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9206 else
9207 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9208#endif
9209 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9210 return VINF_SUCCESS;
9211 }
9212 }
9213
9214 /*
9215 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
9216 * The EFER MSR is always up-to-date.
9217 * Update the segment registers and DR7 from the CPU.
9218 */
9219 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9220 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9221 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9222 AssertRCReturn(rc, rc);
9223 Log4Func(("cs:rip=%#04x:%08RX64\n", pCtx->cs.Sel, pCtx->rip));
9224
9225 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9226 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9227 {
9228 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
9229 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
9230 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
9231 if (RT_SUCCESS(rc))
9232 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
9233 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9234 }
9235 else
9236 {
9237 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
9238 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
9239 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
9240 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9241 }
9242
9243 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
9244 if (RT_SUCCESS(rc))
9245 {
9246 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
9247 AssertRCReturn(rc2, rc2);
9248 return VINF_SUCCESS;
9249 }
9250 return rc;
9251}
9252
9253
9254/**
9255 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9256 * Conditional VM-exit.
9257 */
9258HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9259{
9260 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9261
9262#ifndef IN_NEM_DARWIN
9263 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9264
9265 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9266 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9267 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9268 | HMVMX_READ_IDT_VECTORING_INFO
9269 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9270 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9271
9272 /*
9273 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9274 */
9275 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9276 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9277 {
9278 /*
9279 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9280 * instruction emulation to inject the original event. Otherwise, injecting the original event
9281 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9282 */
9283 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9284 { /* likely */ }
9285 else
9286 {
9287 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9288# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9289 /** @todo NSTVMX: Think about how this should be handled. */
9290 if (pVmxTransient->fIsNestedGuest)
9291 return VERR_VMX_IPE_3;
9292# endif
9293 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9294 }
9295 }
9296 else
9297 {
9298 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9299 return rcStrict;
9300 }
9301
9302 /*
9303 * Get sufficient state and update the exit history entry.
9304 */
9305 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9306 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9307 AssertRCReturn(rc, rc);
9308
9309 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9310 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9311 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9312 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9313 if (!pExitRec)
9314 {
9315 /*
9316 * If we succeed, resume guest execution.
9317 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9318 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9319 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9320 * weird case. See @bugref{6043}.
9321 */
9322 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9323 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9324/** @todo bird: We can probably just go straight to IOM here and assume that
9325 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9326 * well. However, we need to address that aliasing workarounds that
9327 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9328 *
9329 * Might also be interesting to see if we can get this done more or
9330 * less locklessly inside IOM. Need to consider the lookup table
9331 * updating and use a bit more carefully first (or do all updates via
9332 * rendezvous) */
9333 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
9334 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
9335 if ( rcStrict == VINF_SUCCESS
9336 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9337 || rcStrict == VERR_PAGE_NOT_PRESENT)
9338 {
9339 /* Successfully handled MMIO operation. */
9340 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9341 | HM_CHANGED_GUEST_APIC_TPR);
9342 rcStrict = VINF_SUCCESS;
9343 }
9344 }
9345 else
9346 {
9347 /*
9348 * Frequent exit or something needing probing. Call EMHistoryExec.
9349 */
9350 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9351 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9352
9353 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9354 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9355
9356 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9357 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9358 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9359 }
9360 return rcStrict;
9361#else
9362 AssertFailed();
9363 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9364#endif
9365}
9366
9367
9368/**
9369 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9370 * VM-exit.
9371 */
9372HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9373{
9374 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9375#ifndef IN_NEM_DARWIN
9376 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9377
9378 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9379 | HMVMX_READ_EXIT_INSTR_LEN
9380 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9381 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9382 | HMVMX_READ_IDT_VECTORING_INFO
9383 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9384 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9385
9386 /*
9387 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9388 */
9389 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9390 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9391 {
9392 /*
9393 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9394 * we shall resolve the nested #PF and re-inject the original event.
9395 */
9396 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9397 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9398 }
9399 else
9400 {
9401 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9402 return rcStrict;
9403 }
9404
9405 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9406 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9407 AssertRCReturn(rc, rc);
9408
9409 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9410 uint64_t const uExitQual = pVmxTransient->uExitQual;
9411 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9412
9413 RTGCUINT uErrorCode = 0;
9414 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9415 uErrorCode |= X86_TRAP_PF_ID;
9416 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9417 uErrorCode |= X86_TRAP_PF_RW;
9418 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9419 uErrorCode |= X86_TRAP_PF_P;
9420
9421 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9422 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9423
9424 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9425
9426 /*
9427 * Handle the pagefault trap for the nested shadow table.
9428 */
9429 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9430 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
9431 TRPMResetTrap(pVCpu);
9432
9433 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9434 if ( rcStrict == VINF_SUCCESS
9435 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9436 || rcStrict == VERR_PAGE_NOT_PRESENT)
9437 {
9438 /* Successfully synced our nested page tables. */
9439 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9440 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9441 return VINF_SUCCESS;
9442 }
9443 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9444 return rcStrict;
9445
9446#else /* IN_NEM_DARWIN */
9447 PVM pVM = pVCpu->CTX_SUFF(pVM);
9448 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9449 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9450 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9451 vmxHCImportGuestRip(pVCpu);
9452 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9453
9454 /*
9455 * Ask PGM for information about the given GCPhys. We need to check if we're
9456 * out of sync first.
9457 */
9458 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
9459 PGMPHYSNEMPAGEINFO Info;
9460 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9461 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9462 if (RT_SUCCESS(rc))
9463 {
9464 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9465 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9466 {
9467 if (State.fCanResume)
9468 {
9469 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9470 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9471 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9472 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9473 State.fDidSomething ? "" : " no-change"));
9474 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9475 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9476 return VINF_SUCCESS;
9477 }
9478 }
9479
9480 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9481 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9482 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9483 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9484 State.fDidSomething ? "" : " no-change"));
9485 }
9486 else
9487 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9488 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9489 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9490
9491 /*
9492 * Emulate the memory access, either access handler or special memory.
9493 */
9494 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9495 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9496 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9497 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9498 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9499
9500 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9501 AssertRCReturn(rc, rc);
9502
9503 VBOXSTRICTRC rcStrict;
9504 if (!pExitRec)
9505 rcStrict = IEMExecOne(pVCpu);
9506 else
9507 {
9508 /* Frequent access or probing. */
9509 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9510 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9511 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9512 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9513 }
9514
9515 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9516
9517 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9518 return rcStrict;
9519#endif /* IN_NEM_DARWIN */
9520}
9521
9522#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9523
9524/**
9525 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9526 */
9527HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9528{
9529 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9530
9531 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9532 | HMVMX_READ_EXIT_INSTR_INFO
9533 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9534 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9535 | CPUMCTX_EXTRN_SREG_MASK
9536 | CPUMCTX_EXTRN_HWVIRT
9537 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9538 AssertRCReturn(rc, rc);
9539
9540 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9541
9542 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9543 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9544
9545 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9546 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9547 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9548 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9549 {
9550 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9551 rcStrict = VINF_SUCCESS;
9552 }
9553 return rcStrict;
9554}
9555
9556
9557/**
9558 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9559 */
9560HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9561{
9562 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9563
9564 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9565 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9566 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9567 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9568 AssertRCReturn(rc, rc);
9569
9570 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9571
9572 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9573 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9574 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9575 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9576 {
9577 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9578 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9579 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9580 }
9581 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9582 return rcStrict;
9583}
9584
9585
9586/**
9587 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9588 */
9589HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9590{
9591 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9592
9593 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9594 | HMVMX_READ_EXIT_INSTR_INFO
9595 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9596 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9597 | CPUMCTX_EXTRN_SREG_MASK
9598 | CPUMCTX_EXTRN_HWVIRT
9599 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9600 AssertRCReturn(rc, rc);
9601
9602 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9603
9604 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9605 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9606
9607 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9608 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9609 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9610 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9611 {
9612 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9613 rcStrict = VINF_SUCCESS;
9614 }
9615 return rcStrict;
9616}
9617
9618
9619/**
9620 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9621 */
9622HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9623{
9624 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9625
9626 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9627 | HMVMX_READ_EXIT_INSTR_INFO
9628 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9629 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9630 | CPUMCTX_EXTRN_SREG_MASK
9631 | CPUMCTX_EXTRN_HWVIRT
9632 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9633 AssertRCReturn(rc, rc);
9634
9635 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9636
9637 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9638 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9639
9640 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9641 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9642 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9643 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9644 {
9645 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9646 rcStrict = VINF_SUCCESS;
9647 }
9648 return rcStrict;
9649}
9650
9651
9652/**
9653 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9654 */
9655HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9656{
9657 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9658
9659 /*
9660 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9661 * thus might not need to import the shadow VMCS state, it's safer just in case
9662 * code elsewhere dares look at unsynced VMCS fields.
9663 */
9664 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9665 | HMVMX_READ_EXIT_INSTR_INFO
9666 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9667 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9668 | CPUMCTX_EXTRN_SREG_MASK
9669 | CPUMCTX_EXTRN_HWVIRT
9670 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9671 AssertRCReturn(rc, rc);
9672
9673 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9674
9675 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9676 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9677 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9678
9679 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9680 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9681 {
9682 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9683
9684# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9685 /* Try for exit optimization. This is on the following instruction
9686 because it would be a waste of time to have to reinterpret the
9687 already decoded vmwrite instruction. */
9688 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9689 if (pExitRec)
9690 {
9691 /* Frequent access or probing. */
9692 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9693 AssertRCReturn(rc, rc);
9694
9695 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9696 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9697 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9698 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9699 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9700 }
9701# endif
9702 }
9703 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9704 {
9705 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9706 rcStrict = VINF_SUCCESS;
9707 }
9708 return rcStrict;
9709}
9710
9711
9712/**
9713 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9714 */
9715HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9716{
9717 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9718
9719 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9720 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9721 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9722 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9723 AssertRCReturn(rc, rc);
9724
9725 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9726
9727 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9728 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9729 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9730 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9731 {
9732 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9733 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9734 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9735 }
9736 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9737 return rcStrict;
9738}
9739
9740
9741/**
9742 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9743 */
9744HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9745{
9746 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9747
9748 /*
9749 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9750 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9751 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9752 */
9753 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9754 | HMVMX_READ_EXIT_INSTR_INFO
9755 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9756 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9757 | CPUMCTX_EXTRN_SREG_MASK
9758 | CPUMCTX_EXTRN_HWVIRT
9759 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9760 AssertRCReturn(rc, rc);
9761
9762 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9763
9764 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9765 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9766 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9767
9768 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9769 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9770 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9771 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9772 {
9773 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9774 rcStrict = VINF_SUCCESS;
9775 }
9776 return rcStrict;
9777}
9778
9779
9780/**
9781 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9782 */
9783HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9784{
9785 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9786
9787 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9788 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9789 | CPUMCTX_EXTRN_HWVIRT
9790 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9791 AssertRCReturn(rc, rc);
9792
9793 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9794
9795 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9796 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9797 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9798 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9799 {
9800 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9801 rcStrict = VINF_SUCCESS;
9802 }
9803 return rcStrict;
9804}
9805
9806
9807/**
9808 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9809 */
9810HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9811{
9812 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9813
9814 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9815 | HMVMX_READ_EXIT_INSTR_INFO
9816 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9817 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9818 | CPUMCTX_EXTRN_SREG_MASK
9819 | CPUMCTX_EXTRN_HWVIRT
9820 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9821 AssertRCReturn(rc, rc);
9822
9823 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9824
9825 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9826 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9827
9828 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9829 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9830 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9831 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9832 {
9833 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9834 rcStrict = VINF_SUCCESS;
9835 }
9836 return rcStrict;
9837}
9838
9839
9840/**
9841 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9842 */
9843HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9844{
9845 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9846
9847 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9848 | HMVMX_READ_EXIT_INSTR_INFO
9849 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9850 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9851 | CPUMCTX_EXTRN_SREG_MASK
9852 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9853 AssertRCReturn(rc, rc);
9854
9855 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9856
9857 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9858 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9859
9860 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9861 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9862 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9863 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9864 {
9865 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9866 rcStrict = VINF_SUCCESS;
9867 }
9868 return rcStrict;
9869}
9870
9871
9872# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9873/**
9874 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9875 */
9876HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9877{
9878 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9879
9880 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9881 | HMVMX_READ_EXIT_INSTR_INFO
9882 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9883 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9884 | CPUMCTX_EXTRN_SREG_MASK
9885 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9886 AssertRCReturn(rc, rc);
9887
9888 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9889
9890 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9891 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9892
9893 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9894 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9895 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9896 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9897 {
9898 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9899 rcStrict = VINF_SUCCESS;
9900 }
9901 return rcStrict;
9902}
9903# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9904#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9905/** @} */
9906
9907
9908#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9909/** @name Nested-guest VM-exit handlers.
9910 * @{
9911 */
9912/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9913/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9914/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9915
9916/**
9917 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9918 * Conditional VM-exit.
9919 */
9920HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9921{
9922 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9923
9924 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
9925
9926 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9927 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9928 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9929
9930 switch (uExitIntType)
9931 {
9932# ifndef IN_NEM_DARWIN
9933 /*
9934 * Physical NMIs:
9935 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9936 */
9937 case VMX_EXIT_INT_INFO_TYPE_NMI:
9938 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9939# endif
9940
9941 /*
9942 * Hardware exceptions,
9943 * Software exceptions,
9944 * Privileged software exceptions:
9945 * Figure out if the exception must be delivered to the guest or the nested-guest.
9946 */
9947 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9948 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9949 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9950 {
9951 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9952 | HMVMX_READ_EXIT_INSTR_LEN
9953 | HMVMX_READ_IDT_VECTORING_INFO
9954 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9955
9956 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9957 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
9958 {
9959 /* Exit qualification is required for debug and page-fault exceptions. */
9960 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9961
9962 /*
9963 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9964 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9965 * length. However, if delivery of a software interrupt, software exception or privileged
9966 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9967 */
9968 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9969 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
9970 pVmxTransient->uExitIntErrorCode,
9971 pVmxTransient->uIdtVectoringInfo,
9972 pVmxTransient->uIdtVectoringErrorCode);
9973#ifdef DEBUG_ramshankar
9974 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9975 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
9976 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9977 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9978 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
9979 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9980#endif
9981 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9982 }
9983
9984 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9985 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9986 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9987 }
9988
9989 /*
9990 * Software interrupts:
9991 * VM-exits cannot be caused by software interrupts.
9992 *
9993 * External interrupts:
9994 * This should only happen when "acknowledge external interrupts on VM-exit"
9995 * control is set. However, we never set this when executing a guest or
9996 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9997 * the guest.
9998 */
9999 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10000 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
10001 default:
10002 {
10003 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
10004 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10005 }
10006 }
10007}
10008
10009
10010/**
10011 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10012 * Unconditional VM-exit.
10013 */
10014HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10015{
10016 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10017 return IEMExecVmxVmexitTripleFault(pVCpu);
10018}
10019
10020
10021/**
10022 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10023 */
10024HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10025{
10026 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10027
10028 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10029 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10030 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10031}
10032
10033
10034/**
10035 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10036 */
10037HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10038{
10039 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10040
10041 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10042 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10043 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10044}
10045
10046
10047/**
10048 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10049 * Unconditional VM-exit.
10050 */
10051HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10052{
10053 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10054
10055 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10056 | HMVMX_READ_EXIT_INSTR_LEN
10057 | HMVMX_READ_IDT_VECTORING_INFO
10058 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10059
10060 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10061 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10062 pVmxTransient->uIdtVectoringErrorCode);
10063 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10064}
10065
10066
10067/**
10068 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10069 */
10070HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10071{
10072 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10073
10074 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10075 {
10076 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10077 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10078 }
10079 return vmxHCExitHlt(pVCpu, pVmxTransient);
10080}
10081
10082
10083/**
10084 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10085 */
10086HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10087{
10088 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10089
10090 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10091 {
10092 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10093 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10094 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10095 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10096 }
10097 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10098}
10099
10100
10101/**
10102 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10103 */
10104HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10105{
10106 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10107
10108 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10109 {
10110 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10111 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10112 }
10113 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10114}
10115
10116
10117/**
10118 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10119 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10120 */
10121HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10122{
10123 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10124
10125 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10126 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10127
10128 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10129
10130 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10131 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10132 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10133
10134 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10135 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10136 u64VmcsField &= UINT64_C(0xffffffff);
10137
10138 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10139 {
10140 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10141 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10142 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10143 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10144 }
10145
10146 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10147 return vmxHCExitVmread(pVCpu, pVmxTransient);
10148 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10149}
10150
10151
10152/**
10153 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10154 */
10155HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10156{
10157 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10158
10159 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10160 {
10161 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10162 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10163 }
10164
10165 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10166}
10167
10168
10169/**
10170 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10171 * Conditional VM-exit.
10172 */
10173HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10174{
10175 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10176
10177 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10178 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10179
10180 VBOXSTRICTRC rcStrict;
10181 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10182 switch (uAccessType)
10183 {
10184 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10185 {
10186 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10187 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10188 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10189 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10190
10191 bool fIntercept;
10192 switch (iCrReg)
10193 {
10194 case 0:
10195 case 4:
10196 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10197 break;
10198
10199 case 3:
10200 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10201 break;
10202
10203 case 8:
10204 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10205 break;
10206
10207 default:
10208 fIntercept = false;
10209 break;
10210 }
10211 if (fIntercept)
10212 {
10213 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10214 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10215 }
10216 else
10217 {
10218 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10219 AssertRCReturn(rc, rc);
10220 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10221 }
10222 break;
10223 }
10224
10225 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10226 {
10227 /*
10228 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10229 * CR2 reads do not cause a VM-exit.
10230 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10231 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10232 */
10233 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10234 if ( iCrReg == 3
10235 || iCrReg == 8)
10236 {
10237 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10238 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10239 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10240 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10241 {
10242 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10243 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10244 }
10245 else
10246 {
10247 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10248 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10249 }
10250 }
10251 else
10252 {
10253 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10254 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10255 }
10256 break;
10257 }
10258
10259 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10260 {
10261 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10262 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10263 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10264 if ( (uGstHostMask & X86_CR0_TS)
10265 && (uReadShadow & X86_CR0_TS))
10266 {
10267 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10268 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10269 }
10270 else
10271 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10272 break;
10273 }
10274
10275 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10276 {
10277 RTGCPTR GCPtrEffDst;
10278 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10279 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10280 if (fMemOperand)
10281 {
10282 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10283 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10284 }
10285 else
10286 GCPtrEffDst = NIL_RTGCPTR;
10287
10288 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10289 {
10290 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10291 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10292 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10293 }
10294 else
10295 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10296 break;
10297 }
10298
10299 default:
10300 {
10301 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10302 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10303 }
10304 }
10305
10306 if (rcStrict == VINF_IEM_RAISED_XCPT)
10307 {
10308 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10309 rcStrict = VINF_SUCCESS;
10310 }
10311 return rcStrict;
10312}
10313
10314
10315/**
10316 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10317 * Conditional VM-exit.
10318 */
10319HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10320{
10321 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10322
10323 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10324 {
10325 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10326 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10327 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10328 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10329 }
10330 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10331}
10332
10333
10334/**
10335 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10336 * Conditional VM-exit.
10337 */
10338HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10339{
10340 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10341
10342 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10343
10344 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10345 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10346 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10347
10348 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10349 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10350 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10351 {
10352 /*
10353 * IN/OUT instruction:
10354 * - Provides VM-exit instruction length.
10355 *
10356 * INS/OUTS instruction:
10357 * - Provides VM-exit instruction length.
10358 * - Provides Guest-linear address.
10359 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10360 */
10361 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10362 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10363
10364 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10365 pVmxTransient->ExitInstrInfo.u = 0;
10366 pVmxTransient->uGuestLinearAddr = 0;
10367
10368 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10369 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10370 if (fIOString)
10371 {
10372 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10373 if (fVmxInsOutsInfo)
10374 {
10375 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10376 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10377 }
10378 }
10379
10380 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10381 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10382 }
10383 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10384}
10385
10386
10387/**
10388 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10389 */
10390HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10391{
10392 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10393
10394 uint32_t fMsrpm;
10395 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10396 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10397 else
10398 fMsrpm = VMXMSRPM_EXIT_RD;
10399
10400 if (fMsrpm & VMXMSRPM_EXIT_RD)
10401 {
10402 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10403 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10404 }
10405 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10406}
10407
10408
10409/**
10410 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10411 */
10412HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10413{
10414 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10415
10416 uint32_t fMsrpm;
10417 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10418 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10419 else
10420 fMsrpm = VMXMSRPM_EXIT_WR;
10421
10422 if (fMsrpm & VMXMSRPM_EXIT_WR)
10423 {
10424 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10425 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10426 }
10427 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10428}
10429
10430
10431/**
10432 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10433 */
10434HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10435{
10436 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10437
10438 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10439 {
10440 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10441 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10442 }
10443 return vmxHCExitMwait(pVCpu, pVmxTransient);
10444}
10445
10446
10447/**
10448 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10449 * VM-exit.
10450 */
10451HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10452{
10453 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10454
10455 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10456 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10457 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10458 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10459}
10460
10461
10462/**
10463 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10464 */
10465HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10466{
10467 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10468
10469 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10470 {
10471 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10472 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10473 }
10474 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10475}
10476
10477
10478/**
10479 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10480 */
10481HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10482{
10483 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10484
10485 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10486 * PAUSE when executing a nested-guest? If it does not, we would not need
10487 * to check for the intercepts here. Just call VM-exit... */
10488
10489 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10490 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10491 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10492 {
10493 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10494 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10495 }
10496 return vmxHCExitPause(pVCpu, pVmxTransient);
10497}
10498
10499
10500/**
10501 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10502 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10503 */
10504HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10505{
10506 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10507
10508 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10509 {
10510 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10511 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10512 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10513 }
10514 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10515}
10516
10517
10518/**
10519 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10520 * VM-exit.
10521 */
10522HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10523{
10524 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10525
10526 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10527 | HMVMX_READ_EXIT_INSTR_LEN
10528 | HMVMX_READ_IDT_VECTORING_INFO
10529 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10530
10531 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10532
10533 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10534 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10535
10536 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10537 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10538 pVmxTransient->uIdtVectoringErrorCode);
10539 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10540}
10541
10542
10543/**
10544 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10545 * Conditional VM-exit.
10546 */
10547HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10548{
10549 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10550
10551 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10552 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10553 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10554}
10555
10556
10557/**
10558 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10559 * Conditional VM-exit.
10560 */
10561HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10562{
10563 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10564
10565 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10566 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10567 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10568}
10569
10570
10571/**
10572 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10573 */
10574HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10575{
10576 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10577
10578 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10579 {
10580 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10581 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10582 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10583 }
10584 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10585}
10586
10587
10588/**
10589 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10590 */
10591HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10592{
10593 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10594
10595 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10596 {
10597 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10598 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10599 }
10600 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10601}
10602
10603
10604/**
10605 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10606 */
10607HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10608{
10609 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10610
10611 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10612 {
10613 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10614 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10615 | HMVMX_READ_EXIT_INSTR_INFO
10616 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10617 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10618 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10619 }
10620 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10621}
10622
10623
10624/**
10625 * Nested-guest VM-exit handler for invalid-guest state
10626 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10627 */
10628HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10629{
10630 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10631
10632 /*
10633 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10634 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10635 * Handle it like it's in an invalid guest state of the outer guest.
10636 *
10637 * When the fast path is implemented, this should be changed to cause the corresponding
10638 * nested-guest VM-exit.
10639 */
10640 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10641}
10642
10643
10644/**
10645 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10646 * and only provide the instruction length.
10647 *
10648 * Unconditional VM-exit.
10649 */
10650HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10651{
10652 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10653
10654#ifdef VBOX_STRICT
10655 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10656 switch (pVmxTransient->uExitReason)
10657 {
10658 case VMX_EXIT_ENCLS:
10659 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10660 break;
10661
10662 case VMX_EXIT_VMFUNC:
10663 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10664 break;
10665 }
10666#endif
10667
10668 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10669 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10670}
10671
10672
10673/**
10674 * Nested-guest VM-exit handler for instructions that provide instruction length as
10675 * well as more information.
10676 *
10677 * Unconditional VM-exit.
10678 */
10679HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10680{
10681 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10682
10683# ifdef VBOX_STRICT
10684 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10685 switch (pVmxTransient->uExitReason)
10686 {
10687 case VMX_EXIT_GDTR_IDTR_ACCESS:
10688 case VMX_EXIT_LDTR_TR_ACCESS:
10689 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10690 break;
10691
10692 case VMX_EXIT_RDRAND:
10693 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10694 break;
10695
10696 case VMX_EXIT_RDSEED:
10697 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10698 break;
10699
10700 case VMX_EXIT_XSAVES:
10701 case VMX_EXIT_XRSTORS:
10702 /** @todo NSTVMX: Verify XSS-bitmap. */
10703 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10704 break;
10705
10706 case VMX_EXIT_UMWAIT:
10707 case VMX_EXIT_TPAUSE:
10708 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10709 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10710 break;
10711
10712 case VMX_EXIT_LOADIWKEY:
10713 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10714 break;
10715 }
10716# endif
10717
10718 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10719 | HMVMX_READ_EXIT_INSTR_LEN
10720 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10721 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10722 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10723}
10724
10725# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10726
10727/**
10728 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10729 * Conditional VM-exit.
10730 */
10731HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10732{
10733 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10734 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10735
10736 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10737 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10738 {
10739 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10740 | HMVMX_READ_EXIT_INSTR_LEN
10741 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10742 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10743 | HMVMX_READ_IDT_VECTORING_INFO
10744 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10745 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10746 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10747 AssertRCReturn(rc, rc);
10748
10749 /*
10750 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10751 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10752 * it's its problem to deal with that issue and we'll clear the recovered event.
10753 */
10754 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10755 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10756 { /*likely*/ }
10757 else
10758 {
10759 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10760 return rcStrict;
10761 }
10762 bool const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10763
10764 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10765 uint64_t const uExitQual = pVmxTransient->uExitQual;
10766
10767 RTGCPTR GCPtrNestedFault;
10768 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10769 if (fIsLinearAddrValid)
10770 {
10771 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10772 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10773 }
10774 else
10775 GCPtrNestedFault = 0;
10776
10777 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10778 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10779 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10780 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10781 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10782
10783 PGMPTWALK Walk;
10784 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10785 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, CPUMCTX2CORE(pCtx),
10786 GCPhysNestedFault, fIsLinearAddrValid, GCPtrNestedFault,
10787 &Walk);
10788 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10789 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10790 if (RT_SUCCESS(rcStrict))
10791 return rcStrict;
10792
10793 if (fClearEventOnForward)
10794 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10795
10796 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10797 pVmxTransient->uIdtVectoringErrorCode);
10798 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10799 {
10800 VMXVEXITINFO const ExitInfo
10801 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10802 pVmxTransient->uExitQual,
10803 pVmxTransient->cbExitInstr,
10804 pVmxTransient->uGuestLinearAddr,
10805 pVmxTransient->uGuestPhysicalAddr);
10806 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10807 }
10808
10809 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10810 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10811 }
10812
10813 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10814}
10815
10816
10817/**
10818 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10819 * Conditional VM-exit.
10820 */
10821HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10822{
10823 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10824 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10825
10826 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10827 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10828 {
10829 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10830 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10831 AssertRCReturn(rc, rc);
10832
10833 PGMPTWALK Walk;
10834 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10835 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10836 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, CPUMCTX2CORE(pCtx),
10837 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10838 0 /* GCPtrNestedFault */, &Walk);
10839 if (RT_SUCCESS(rcStrict))
10840 {
10841 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10842 return rcStrict;
10843 }
10844
10845 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10846 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
10847 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10848
10849 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10850 pVmxTransient->uIdtVectoringErrorCode);
10851 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10852 }
10853
10854 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10855}
10856
10857# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10858
10859/** @} */
10860#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10861
10862
10863/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10864 * probes.
10865 *
10866 * The following few functions and associated structure contains the bloat
10867 * necessary for providing detailed debug events and dtrace probes as well as
10868 * reliable host side single stepping. This works on the principle of
10869 * "subclassing" the normal execution loop and workers. We replace the loop
10870 * method completely and override selected helpers to add necessary adjustments
10871 * to their core operation.
10872 *
10873 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10874 * any performance for debug and analysis features.
10875 *
10876 * @{
10877 */
10878
10879/**
10880 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10881 * the debug run loop.
10882 */
10883typedef struct VMXRUNDBGSTATE
10884{
10885 /** The RIP we started executing at. This is for detecting that we stepped. */
10886 uint64_t uRipStart;
10887 /** The CS we started executing with. */
10888 uint16_t uCsStart;
10889
10890 /** Whether we've actually modified the 1st execution control field. */
10891 bool fModifiedProcCtls : 1;
10892 /** Whether we've actually modified the 2nd execution control field. */
10893 bool fModifiedProcCtls2 : 1;
10894 /** Whether we've actually modified the exception bitmap. */
10895 bool fModifiedXcptBitmap : 1;
10896
10897 /** We desire the modified the CR0 mask to be cleared. */
10898 bool fClearCr0Mask : 1;
10899 /** We desire the modified the CR4 mask to be cleared. */
10900 bool fClearCr4Mask : 1;
10901 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10902 uint32_t fCpe1Extra;
10903 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10904 uint32_t fCpe1Unwanted;
10905 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10906 uint32_t fCpe2Extra;
10907 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10908 uint32_t bmXcptExtra;
10909 /** The sequence number of the Dtrace provider settings the state was
10910 * configured against. */
10911 uint32_t uDtraceSettingsSeqNo;
10912 /** VM-exits to check (one bit per VM-exit). */
10913 uint32_t bmExitsToCheck[3];
10914
10915 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10916 uint32_t fProcCtlsInitial;
10917 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10918 uint32_t fProcCtls2Initial;
10919 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10920 uint32_t bmXcptInitial;
10921} VMXRUNDBGSTATE;
10922AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10923typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10924
10925
10926/**
10927 * Initializes the VMXRUNDBGSTATE structure.
10928 *
10929 * @param pVCpu The cross context virtual CPU structure of the
10930 * calling EMT.
10931 * @param pVmxTransient The VMX-transient structure.
10932 * @param pDbgState The debug state to initialize.
10933 */
10934static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10935{
10936 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10937 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10938
10939 pDbgState->fModifiedProcCtls = false;
10940 pDbgState->fModifiedProcCtls2 = false;
10941 pDbgState->fModifiedXcptBitmap = false;
10942 pDbgState->fClearCr0Mask = false;
10943 pDbgState->fClearCr4Mask = false;
10944 pDbgState->fCpe1Extra = 0;
10945 pDbgState->fCpe1Unwanted = 0;
10946 pDbgState->fCpe2Extra = 0;
10947 pDbgState->bmXcptExtra = 0;
10948 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10949 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10950 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10951}
10952
10953
10954/**
10955 * Updates the VMSC fields with changes requested by @a pDbgState.
10956 *
10957 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10958 * immediately before executing guest code, i.e. when interrupts are disabled.
10959 * We don't check status codes here as we cannot easily assert or return in the
10960 * latter case.
10961 *
10962 * @param pVCpu The cross context virtual CPU structure.
10963 * @param pVmxTransient The VMX-transient structure.
10964 * @param pDbgState The debug state.
10965 */
10966static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10967{
10968 /*
10969 * Ensure desired flags in VMCS control fields are set.
10970 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10971 *
10972 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10973 * there should be no stale data in pCtx at this point.
10974 */
10975 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10976 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10977 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10978 {
10979 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10980 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10981 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10982 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10983 pDbgState->fModifiedProcCtls = true;
10984 }
10985
10986 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
10987 {
10988 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
10989 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
10990 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
10991 pDbgState->fModifiedProcCtls2 = true;
10992 }
10993
10994 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
10995 {
10996 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
10997 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
10998 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
10999 pDbgState->fModifiedXcptBitmap = true;
11000 }
11001
11002 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11003 {
11004 pVmcsInfo->u64Cr0Mask = 0;
11005 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
11006 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11007 }
11008
11009 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11010 {
11011 pVmcsInfo->u64Cr4Mask = 0;
11012 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11013 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11014 }
11015
11016 NOREF(pVCpu);
11017}
11018
11019
11020/**
11021 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11022 * re-entry next time around.
11023 *
11024 * @returns Strict VBox status code (i.e. informational status codes too).
11025 * @param pVCpu The cross context virtual CPU structure.
11026 * @param pVmxTransient The VMX-transient structure.
11027 * @param pDbgState The debug state.
11028 * @param rcStrict The return code from executing the guest using single
11029 * stepping.
11030 */
11031static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11032 VBOXSTRICTRC rcStrict)
11033{
11034 /*
11035 * Restore VM-exit control settings as we may not reenter this function the
11036 * next time around.
11037 */
11038 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11039
11040 /* We reload the initial value, trigger what we can of recalculations the
11041 next time around. From the looks of things, that's all that's required atm. */
11042 if (pDbgState->fModifiedProcCtls)
11043 {
11044 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11045 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11046 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11047 AssertRC(rc2);
11048 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11049 }
11050
11051 /* We're currently the only ones messing with this one, so just restore the
11052 cached value and reload the field. */
11053 if ( pDbgState->fModifiedProcCtls2
11054 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11055 {
11056 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11057 AssertRC(rc2);
11058 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11059 }
11060
11061 /* If we've modified the exception bitmap, we restore it and trigger
11062 reloading and partial recalculation the next time around. */
11063 if (pDbgState->fModifiedXcptBitmap)
11064 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11065
11066 return rcStrict;
11067}
11068
11069
11070/**
11071 * Configures VM-exit controls for current DBGF and DTrace settings.
11072 *
11073 * This updates @a pDbgState and the VMCS execution control fields to reflect
11074 * the necessary VM-exits demanded by DBGF and DTrace.
11075 *
11076 * @param pVCpu The cross context virtual CPU structure.
11077 * @param pVmxTransient The VMX-transient structure. May update
11078 * fUpdatedTscOffsettingAndPreemptTimer.
11079 * @param pDbgState The debug state.
11080 */
11081static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11082{
11083#ifndef IN_NEM_DARWIN
11084 /*
11085 * Take down the dtrace serial number so we can spot changes.
11086 */
11087 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11088 ASMCompilerBarrier();
11089#endif
11090
11091 /*
11092 * We'll rebuild most of the middle block of data members (holding the
11093 * current settings) as we go along here, so start by clearing it all.
11094 */
11095 pDbgState->bmXcptExtra = 0;
11096 pDbgState->fCpe1Extra = 0;
11097 pDbgState->fCpe1Unwanted = 0;
11098 pDbgState->fCpe2Extra = 0;
11099 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11100 pDbgState->bmExitsToCheck[i] = 0;
11101
11102 /*
11103 * Software interrupts (INT XXh) - no idea how to trigger these...
11104 */
11105 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11106 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11107 || VBOXVMM_INT_SOFTWARE_ENABLED())
11108 {
11109 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11110 }
11111
11112 /*
11113 * INT3 breakpoints - triggered by #BP exceptions.
11114 */
11115 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11116 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11117
11118 /*
11119 * Exception bitmap and XCPT events+probes.
11120 */
11121 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11122 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11123 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11124
11125 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11126 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11127 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11128 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11129 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11130 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11131 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11132 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11133 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11134 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11135 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11136 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11137 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11138 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11139 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11140 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11141 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11142 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11143
11144 if (pDbgState->bmXcptExtra)
11145 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11146
11147 /*
11148 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11149 *
11150 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11151 * So, when adding/changing/removing please don't forget to update it.
11152 *
11153 * Some of the macros are picking up local variables to save horizontal space,
11154 * (being able to see it in a table is the lesser evil here).
11155 */
11156#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11157 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11158 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11159#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11160 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11161 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11162 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11163 } else do { } while (0)
11164#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11165 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11166 { \
11167 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11168 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11169 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11170 } else do { } while (0)
11171#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11172 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11173 { \
11174 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11175 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11176 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11177 } else do { } while (0)
11178#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11179 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11180 { \
11181 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11182 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11183 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11184 } else do { } while (0)
11185
11186 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11187 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11188 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11189 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11190 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11191
11192 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11193 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11194 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11195 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11196 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11197 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11198 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11199 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11200 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11201 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11202 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11203 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11204 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11205 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11206 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11207 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11208 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11209 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11210 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11211 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11212 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11213 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11214 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11215 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11216 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11217 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11218 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11219 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11220 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11221 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11222 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11223 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11224 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11225 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11226 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11227 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11228
11229 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11230 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11231 {
11232 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11233 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11234 AssertRC(rc);
11235
11236#if 0 /** @todo fix me */
11237 pDbgState->fClearCr0Mask = true;
11238 pDbgState->fClearCr4Mask = true;
11239#endif
11240 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11241 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11242 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11243 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11244 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11245 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11246 require clearing here and in the loop if we start using it. */
11247 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11248 }
11249 else
11250 {
11251 if (pDbgState->fClearCr0Mask)
11252 {
11253 pDbgState->fClearCr0Mask = false;
11254 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11255 }
11256 if (pDbgState->fClearCr4Mask)
11257 {
11258 pDbgState->fClearCr4Mask = false;
11259 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11260 }
11261 }
11262 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11263 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11264
11265 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11266 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11267 {
11268 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11269 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11270 }
11271 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11272 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11273
11274 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11275 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11276 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11277 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11278 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11279 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11280 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11281 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11282#if 0 /** @todo too slow, fix handler. */
11283 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11284#endif
11285 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11286
11287 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11288 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11289 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11290 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11291 {
11292 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11293 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11294 }
11295 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11296 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11297 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11298 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11299
11300 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11301 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11302 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11303 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11304 {
11305 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11306 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11307 }
11308 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11309 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11310 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11311 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11312
11313 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11314 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11315 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11316 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11317 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11318 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11319 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11320 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11321 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11322 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11323 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11324 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11325 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11326 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11327 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11328 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11329 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11330 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11331 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11332 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11333 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11334 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11335
11336#undef IS_EITHER_ENABLED
11337#undef SET_ONLY_XBM_IF_EITHER_EN
11338#undef SET_CPE1_XBM_IF_EITHER_EN
11339#undef SET_CPEU_XBM_IF_EITHER_EN
11340#undef SET_CPE2_XBM_IF_EITHER_EN
11341
11342 /*
11343 * Sanitize the control stuff.
11344 */
11345 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11346 if (pDbgState->fCpe2Extra)
11347 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11348 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11349 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11350#ifndef IN_NEM_DARWIN
11351 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11352 {
11353 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11354 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11355 }
11356#else
11357 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11358 {
11359 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11360 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11361 }
11362#endif
11363
11364 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11365 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11366 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11367 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11368}
11369
11370
11371/**
11372 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11373 * appropriate.
11374 *
11375 * The caller has checked the VM-exit against the
11376 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11377 * already, so we don't have to do that either.
11378 *
11379 * @returns Strict VBox status code (i.e. informational status codes too).
11380 * @param pVCpu The cross context virtual CPU structure.
11381 * @param pVmxTransient The VMX-transient structure.
11382 * @param uExitReason The VM-exit reason.
11383 *
11384 * @remarks The name of this function is displayed by dtrace, so keep it short
11385 * and to the point. No longer than 33 chars long, please.
11386 */
11387static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11388{
11389 /*
11390 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11391 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11392 *
11393 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11394 * does. Must add/change/remove both places. Same ordering, please.
11395 *
11396 * Added/removed events must also be reflected in the next section
11397 * where we dispatch dtrace events.
11398 */
11399 bool fDtrace1 = false;
11400 bool fDtrace2 = false;
11401 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11402 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11403 uint32_t uEventArg = 0;
11404#define SET_EXIT(a_EventSubName) \
11405 do { \
11406 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11407 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11408 } while (0)
11409#define SET_BOTH(a_EventSubName) \
11410 do { \
11411 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11412 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11413 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11414 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11415 } while (0)
11416 switch (uExitReason)
11417 {
11418 case VMX_EXIT_MTF:
11419 return vmxHCExitMtf(pVCpu, pVmxTransient);
11420
11421 case VMX_EXIT_XCPT_OR_NMI:
11422 {
11423 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11424 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11425 {
11426 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11427 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11428 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11429 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11430 {
11431 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11432 {
11433 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11434 uEventArg = pVmxTransient->uExitIntErrorCode;
11435 }
11436 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11437 switch (enmEvent1)
11438 {
11439 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11440 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11441 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11442 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11443 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11444 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11445 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11446 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11447 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11448 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11449 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11450 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11451 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11452 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11453 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11454 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11455 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11456 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11457 default: break;
11458 }
11459 }
11460 else
11461 AssertFailed();
11462 break;
11463
11464 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11465 uEventArg = idxVector;
11466 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11467 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11468 break;
11469 }
11470 break;
11471 }
11472
11473 case VMX_EXIT_TRIPLE_FAULT:
11474 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11475 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11476 break;
11477 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11478 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11479 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11480 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11481 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11482
11483 /* Instruction specific VM-exits: */
11484 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11485 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11486 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11487 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11488 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11489 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11490 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11491 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11492 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11493 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11494 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11495 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11496 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11497 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11498 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11499 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11500 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11501 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11502 case VMX_EXIT_MOV_CRX:
11503 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11504 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11505 SET_BOTH(CRX_READ);
11506 else
11507 SET_BOTH(CRX_WRITE);
11508 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11509 break;
11510 case VMX_EXIT_MOV_DRX:
11511 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11512 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11513 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11514 SET_BOTH(DRX_READ);
11515 else
11516 SET_BOTH(DRX_WRITE);
11517 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11518 break;
11519 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11520 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11521 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11522 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11523 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11524 case VMX_EXIT_GDTR_IDTR_ACCESS:
11525 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11526 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11527 {
11528 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11529 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11530 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11531 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11532 }
11533 break;
11534
11535 case VMX_EXIT_LDTR_TR_ACCESS:
11536 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11537 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11538 {
11539 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11540 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11541 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11542 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11543 }
11544 break;
11545
11546 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11547 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11548 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11549 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11550 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11551 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11552 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11553 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11554 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11555 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11556 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11557
11558 /* Events that aren't relevant at this point. */
11559 case VMX_EXIT_EXT_INT:
11560 case VMX_EXIT_INT_WINDOW:
11561 case VMX_EXIT_NMI_WINDOW:
11562 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11563 case VMX_EXIT_PREEMPT_TIMER:
11564 case VMX_EXIT_IO_INSTR:
11565 break;
11566
11567 /* Errors and unexpected events. */
11568 case VMX_EXIT_INIT_SIGNAL:
11569 case VMX_EXIT_SIPI:
11570 case VMX_EXIT_IO_SMI:
11571 case VMX_EXIT_SMI:
11572 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11573 case VMX_EXIT_ERR_MSR_LOAD:
11574 case VMX_EXIT_ERR_MACHINE_CHECK:
11575 case VMX_EXIT_PML_FULL:
11576 case VMX_EXIT_VIRTUALIZED_EOI:
11577 break;
11578
11579 default:
11580 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11581 break;
11582 }
11583#undef SET_BOTH
11584#undef SET_EXIT
11585
11586 /*
11587 * Dtrace tracepoints go first. We do them here at once so we don't
11588 * have to copy the guest state saving and stuff a few dozen times.
11589 * Down side is that we've got to repeat the switch, though this time
11590 * we use enmEvent since the probes are a subset of what DBGF does.
11591 */
11592 if (fDtrace1 || fDtrace2)
11593 {
11594 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11595 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11596 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11597 switch (enmEvent1)
11598 {
11599 /** @todo consider which extra parameters would be helpful for each probe. */
11600 case DBGFEVENT_END: break;
11601 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11602 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11603 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11604 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11605 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11606 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11607 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11608 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11609 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11610 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11611 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11612 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11613 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11614 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11615 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11616 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11617 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11618 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11619 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11620 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11621 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11622 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11623 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11624 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11625 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11626 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11627 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11628 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11629 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11630 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11631 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11632 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11633 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11634 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11635 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11636 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11637 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11638 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11639 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11640 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11641 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11642 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11643 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11644 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11645 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11646 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11647 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11648 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11649 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11650 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11651 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11652 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11653 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11654 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11655 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11656 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11657 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11658 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11659 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11660 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11661 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11662 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11663 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11664 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11665 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11666 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11667 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11668 }
11669 switch (enmEvent2)
11670 {
11671 /** @todo consider which extra parameters would be helpful for each probe. */
11672 case DBGFEVENT_END: break;
11673 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11674 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11675 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11676 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11677 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11678 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11679 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11680 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11681 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11682 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11683 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11684 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11685 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11686 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11687 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11688 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11689 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11690 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11691 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11692 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11693 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11694 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11695 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11696 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11697 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11698 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11699 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11700 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11701 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11702 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11703 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11704 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11705 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11706 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11707 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11708 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11709 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11710 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11711 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11712 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11713 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11714 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11715 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11716 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11717 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11718 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11719 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11720 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11721 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11722 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11723 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11724 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11725 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11726 }
11727 }
11728
11729 /*
11730 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11731 * the DBGF call will do a full check).
11732 *
11733 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11734 * Note! If we have to events, we prioritize the first, i.e. the instruction
11735 * one, in order to avoid event nesting.
11736 */
11737 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11738 if ( enmEvent1 != DBGFEVENT_END
11739 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11740 {
11741 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11742 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11743 if (rcStrict != VINF_SUCCESS)
11744 return rcStrict;
11745 }
11746 else if ( enmEvent2 != DBGFEVENT_END
11747 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11748 {
11749 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11750 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11751 if (rcStrict != VINF_SUCCESS)
11752 return rcStrict;
11753 }
11754
11755 return VINF_SUCCESS;
11756}
11757
11758
11759/**
11760 * Single-stepping VM-exit filtering.
11761 *
11762 * This is preprocessing the VM-exits and deciding whether we've gotten far
11763 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11764 * handling is performed.
11765 *
11766 * @returns Strict VBox status code (i.e. informational status codes too).
11767 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11768 * @param pVmxTransient The VMX-transient structure.
11769 * @param pDbgState The debug state.
11770 */
11771DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11772{
11773 /*
11774 * Expensive (saves context) generic dtrace VM-exit probe.
11775 */
11776 uint32_t const uExitReason = pVmxTransient->uExitReason;
11777 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11778 { /* more likely */ }
11779 else
11780 {
11781 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11782 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11783 AssertRC(rc);
11784 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11785 }
11786
11787#ifndef IN_NEM_DARWIN
11788 /*
11789 * Check for host NMI, just to get that out of the way.
11790 */
11791 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11792 { /* normally likely */ }
11793 else
11794 {
11795 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11796 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11797 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11798 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11799 }
11800#endif
11801
11802 /*
11803 * Check for single stepping event if we're stepping.
11804 */
11805 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11806 {
11807 switch (uExitReason)
11808 {
11809 case VMX_EXIT_MTF:
11810 return vmxHCExitMtf(pVCpu, pVmxTransient);
11811
11812 /* Various events: */
11813 case VMX_EXIT_XCPT_OR_NMI:
11814 case VMX_EXIT_EXT_INT:
11815 case VMX_EXIT_TRIPLE_FAULT:
11816 case VMX_EXIT_INT_WINDOW:
11817 case VMX_EXIT_NMI_WINDOW:
11818 case VMX_EXIT_TASK_SWITCH:
11819 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11820 case VMX_EXIT_APIC_ACCESS:
11821 case VMX_EXIT_EPT_VIOLATION:
11822 case VMX_EXIT_EPT_MISCONFIG:
11823 case VMX_EXIT_PREEMPT_TIMER:
11824
11825 /* Instruction specific VM-exits: */
11826 case VMX_EXIT_CPUID:
11827 case VMX_EXIT_GETSEC:
11828 case VMX_EXIT_HLT:
11829 case VMX_EXIT_INVD:
11830 case VMX_EXIT_INVLPG:
11831 case VMX_EXIT_RDPMC:
11832 case VMX_EXIT_RDTSC:
11833 case VMX_EXIT_RSM:
11834 case VMX_EXIT_VMCALL:
11835 case VMX_EXIT_VMCLEAR:
11836 case VMX_EXIT_VMLAUNCH:
11837 case VMX_EXIT_VMPTRLD:
11838 case VMX_EXIT_VMPTRST:
11839 case VMX_EXIT_VMREAD:
11840 case VMX_EXIT_VMRESUME:
11841 case VMX_EXIT_VMWRITE:
11842 case VMX_EXIT_VMXOFF:
11843 case VMX_EXIT_VMXON:
11844 case VMX_EXIT_MOV_CRX:
11845 case VMX_EXIT_MOV_DRX:
11846 case VMX_EXIT_IO_INSTR:
11847 case VMX_EXIT_RDMSR:
11848 case VMX_EXIT_WRMSR:
11849 case VMX_EXIT_MWAIT:
11850 case VMX_EXIT_MONITOR:
11851 case VMX_EXIT_PAUSE:
11852 case VMX_EXIT_GDTR_IDTR_ACCESS:
11853 case VMX_EXIT_LDTR_TR_ACCESS:
11854 case VMX_EXIT_INVEPT:
11855 case VMX_EXIT_RDTSCP:
11856 case VMX_EXIT_INVVPID:
11857 case VMX_EXIT_WBINVD:
11858 case VMX_EXIT_XSETBV:
11859 case VMX_EXIT_RDRAND:
11860 case VMX_EXIT_INVPCID:
11861 case VMX_EXIT_VMFUNC:
11862 case VMX_EXIT_RDSEED:
11863 case VMX_EXIT_XSAVES:
11864 case VMX_EXIT_XRSTORS:
11865 {
11866 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11867 AssertRCReturn(rc, rc);
11868 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11869 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11870 return VINF_EM_DBG_STEPPED;
11871 break;
11872 }
11873
11874 /* Errors and unexpected events: */
11875 case VMX_EXIT_INIT_SIGNAL:
11876 case VMX_EXIT_SIPI:
11877 case VMX_EXIT_IO_SMI:
11878 case VMX_EXIT_SMI:
11879 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11880 case VMX_EXIT_ERR_MSR_LOAD:
11881 case VMX_EXIT_ERR_MACHINE_CHECK:
11882 case VMX_EXIT_PML_FULL:
11883 case VMX_EXIT_VIRTUALIZED_EOI:
11884 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11885 break;
11886
11887 default:
11888 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11889 break;
11890 }
11891 }
11892
11893 /*
11894 * Check for debugger event breakpoints and dtrace probes.
11895 */
11896 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11897 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11898 {
11899 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11900 if (rcStrict != VINF_SUCCESS)
11901 return rcStrict;
11902 }
11903
11904 /*
11905 * Normal processing.
11906 */
11907#ifdef HMVMX_USE_FUNCTION_TABLE
11908 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11909#else
11910 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11911#endif
11912}
11913
11914/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette