VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 94991

Last change on this file since 94991 was 94944, checked in by vboxsync, 3 years ago

VMM/HM: Use g_CpumHostFeatures instead of the copy in the VM structure. bugref:10093

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 492.1 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 94944 2022-05-09 09:45:33Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
23# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
24#endif
25
26
27#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
28# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
29#endif
30
31
32/** Use the function table. */
33#define HMVMX_USE_FUNCTION_TABLE
34
35/** Determine which tagged-TLB flush handler to use. */
36#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
37#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
38#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
39#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
40
41/** Assert that all the given fields have been read from the VMCS. */
42#ifdef VBOX_STRICT
43# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
44 do { \
45 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
46 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
47 } while (0)
48#else
49# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
50#endif
51
52/**
53 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
54 * guest using hardware-assisted VMX.
55 *
56 * This excludes state like GPRs (other than RSP) which are always are
57 * swapped and restored across the world-switch and also registers like EFER,
58 * MSR which cannot be modified by the guest without causing a VM-exit.
59 */
60#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
61 | CPUMCTX_EXTRN_RFLAGS \
62 | CPUMCTX_EXTRN_RSP \
63 | CPUMCTX_EXTRN_SREG_MASK \
64 | CPUMCTX_EXTRN_TABLE_MASK \
65 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
66 | CPUMCTX_EXTRN_SYSCALL_MSRS \
67 | CPUMCTX_EXTRN_SYSENTER_MSRS \
68 | CPUMCTX_EXTRN_TSC_AUX \
69 | CPUMCTX_EXTRN_OTHER_MSRS \
70 | CPUMCTX_EXTRN_CR0 \
71 | CPUMCTX_EXTRN_CR3 \
72 | CPUMCTX_EXTRN_CR4 \
73 | CPUMCTX_EXTRN_DR7 \
74 | CPUMCTX_EXTRN_HWVIRT \
75 | CPUMCTX_EXTRN_INHIBIT_INT \
76 | CPUMCTX_EXTRN_INHIBIT_NMI)
77
78/**
79 * Exception bitmap mask for real-mode guests (real-on-v86).
80 *
81 * We need to intercept all exceptions manually except:
82 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
83 * due to bugs in Intel CPUs.
84 * - \#PF need not be intercepted even in real-mode if we have nested paging
85 * support.
86 */
87#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
88 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
89 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
90 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
91 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
92 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
93 | RT_BIT(X86_XCPT_XF))
94
95/** Maximum VM-instruction error number. */
96#define HMVMX_INSTR_ERROR_MAX 28
97
98/** Profiling macro. */
99#ifdef HM_PROFILE_EXIT_DISPATCH
100# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
101# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
102#else
103# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
104# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
105#endif
106
107#ifndef IN_NEM_DARWIN
108/** Assert that preemption is disabled or covered by thread-context hooks. */
109# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
110 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
111
112/** Assert that we haven't migrated CPUs when thread-context hooks are not
113 * used. */
114# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
115 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
116 ("Illegal migration! Entered on CPU %u Current %u\n", \
117 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
118#else
119# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
120# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
121#endif
122
123/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
124 * context. */
125#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
126 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
127 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
128
129/** Log the VM-exit reason with an easily visible marker to identify it in a
130 * potential sea of logging data. */
131#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
132 do { \
133 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
134 HMGetVmxExitName(a_uExitReason))); \
135 } while (0) \
136
137
138/*********************************************************************************************************************************
139* Structures and Typedefs *
140*********************************************************************************************************************************/
141/**
142 * Memory operand read or write access.
143 */
144typedef enum VMXMEMACCESS
145{
146 VMXMEMACCESS_READ = 0,
147 VMXMEMACCESS_WRITE = 1
148} VMXMEMACCESS;
149
150
151/**
152 * VMX VM-exit handler.
153 *
154 * @returns Strict VBox status code (i.e. informational status codes too).
155 * @param pVCpu The cross context virtual CPU structure.
156 * @param pVmxTransient The VMX-transient structure.
157 */
158#ifndef HMVMX_USE_FUNCTION_TABLE
159typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
160#else
161typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
162/** Pointer to VM-exit handler. */
163typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
164#endif
165
166/**
167 * VMX VM-exit handler, non-strict status code.
168 *
169 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
170 *
171 * @returns VBox status code, no informational status code returned.
172 * @param pVCpu The cross context virtual CPU structure.
173 * @param pVmxTransient The VMX-transient structure.
174 *
175 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
176 * use of that status code will be replaced with VINF_EM_SOMETHING
177 * later when switching over to IEM.
178 */
179#ifndef HMVMX_USE_FUNCTION_TABLE
180typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
181#else
182typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
183#endif
184
185
186/*********************************************************************************************************************************
187* Internal Functions *
188*********************************************************************************************************************************/
189#ifndef HMVMX_USE_FUNCTION_TABLE
190DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
191# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
192# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
193#else
194# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
195# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
196#endif
197#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
198DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
199#endif
200
201static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
202
203/** @name VM-exit handler prototypes.
204 * @{
205 */
206static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
207static FNVMXEXITHANDLER vmxHCExitExtInt;
208static FNVMXEXITHANDLER vmxHCExitTripleFault;
209static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
210static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
211static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
212static FNVMXEXITHANDLER vmxHCExitCpuid;
213static FNVMXEXITHANDLER vmxHCExitGetsec;
214static FNVMXEXITHANDLER vmxHCExitHlt;
215static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
216static FNVMXEXITHANDLER vmxHCExitInvlpg;
217static FNVMXEXITHANDLER vmxHCExitRdpmc;
218static FNVMXEXITHANDLER vmxHCExitVmcall;
219#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
220static FNVMXEXITHANDLER vmxHCExitVmclear;
221static FNVMXEXITHANDLER vmxHCExitVmlaunch;
222static FNVMXEXITHANDLER vmxHCExitVmptrld;
223static FNVMXEXITHANDLER vmxHCExitVmptrst;
224static FNVMXEXITHANDLER vmxHCExitVmread;
225static FNVMXEXITHANDLER vmxHCExitVmresume;
226static FNVMXEXITHANDLER vmxHCExitVmwrite;
227static FNVMXEXITHANDLER vmxHCExitVmxoff;
228static FNVMXEXITHANDLER vmxHCExitVmxon;
229static FNVMXEXITHANDLER vmxHCExitInvvpid;
230# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
231static FNVMXEXITHANDLER vmxHCExitInvept;
232# endif
233#endif
234static FNVMXEXITHANDLER vmxHCExitRdtsc;
235static FNVMXEXITHANDLER vmxHCExitMovCRx;
236static FNVMXEXITHANDLER vmxHCExitMovDRx;
237static FNVMXEXITHANDLER vmxHCExitIoInstr;
238static FNVMXEXITHANDLER vmxHCExitRdmsr;
239static FNVMXEXITHANDLER vmxHCExitWrmsr;
240static FNVMXEXITHANDLER vmxHCExitMwait;
241static FNVMXEXITHANDLER vmxHCExitMtf;
242static FNVMXEXITHANDLER vmxHCExitMonitor;
243static FNVMXEXITHANDLER vmxHCExitPause;
244static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
245static FNVMXEXITHANDLER vmxHCExitApicAccess;
246static FNVMXEXITHANDLER vmxHCExitEptViolation;
247static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
248static FNVMXEXITHANDLER vmxHCExitRdtscp;
249static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
250static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
251static FNVMXEXITHANDLER vmxHCExitXsetbv;
252static FNVMXEXITHANDLER vmxHCExitInvpcid;
253static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
254static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
255static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
256/** @} */
257
258#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
259/** @name Nested-guest VM-exit handler prototypes.
260 * @{
261 */
262static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
263static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
264static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
265static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
266static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
267static FNVMXEXITHANDLER vmxHCExitHltNested;
268static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
269static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
270static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
271static FNVMXEXITHANDLER vmxHCExitRdtscNested;
272static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
273static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
274static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
275static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
276static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
277static FNVMXEXITHANDLER vmxHCExitMwaitNested;
278static FNVMXEXITHANDLER vmxHCExitMtfNested;
279static FNVMXEXITHANDLER vmxHCExitMonitorNested;
280static FNVMXEXITHANDLER vmxHCExitPauseNested;
281static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
282static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
283static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
284static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
286static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
287static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
288static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
289static FNVMXEXITHANDLER vmxHCExitInstrNested;
290static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
291# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
292static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
293static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
294# endif
295/** @} */
296#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
297
298
299/*********************************************************************************************************************************
300* Global Variables *
301*********************************************************************************************************************************/
302#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
303/**
304 * Array of all VMCS fields.
305 * Any fields added to the VT-x spec. should be added here.
306 *
307 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
308 * of nested-guests.
309 */
310static const uint32_t g_aVmcsFields[] =
311{
312 /* 16-bit control fields. */
313 VMX_VMCS16_VPID,
314 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
315 VMX_VMCS16_EPTP_INDEX,
316
317 /* 16-bit guest-state fields. */
318 VMX_VMCS16_GUEST_ES_SEL,
319 VMX_VMCS16_GUEST_CS_SEL,
320 VMX_VMCS16_GUEST_SS_SEL,
321 VMX_VMCS16_GUEST_DS_SEL,
322 VMX_VMCS16_GUEST_FS_SEL,
323 VMX_VMCS16_GUEST_GS_SEL,
324 VMX_VMCS16_GUEST_LDTR_SEL,
325 VMX_VMCS16_GUEST_TR_SEL,
326 VMX_VMCS16_GUEST_INTR_STATUS,
327 VMX_VMCS16_GUEST_PML_INDEX,
328
329 /* 16-bits host-state fields. */
330 VMX_VMCS16_HOST_ES_SEL,
331 VMX_VMCS16_HOST_CS_SEL,
332 VMX_VMCS16_HOST_SS_SEL,
333 VMX_VMCS16_HOST_DS_SEL,
334 VMX_VMCS16_HOST_FS_SEL,
335 VMX_VMCS16_HOST_GS_SEL,
336 VMX_VMCS16_HOST_TR_SEL,
337
338 /* 64-bit control fields. */
339 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
340 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
341 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
342 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
343 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
344 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
345 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
346 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
347 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
348 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
349 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
350 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
351 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
352 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
353 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
354 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
355 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
356 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
357 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
358 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
359 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
360 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
361 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
362 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
363 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
364 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
365 VMX_VMCS64_CTRL_EPTP_FULL,
366 VMX_VMCS64_CTRL_EPTP_HIGH,
367 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
368 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
369 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
370 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
371 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
372 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
373 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
374 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
375 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
376 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
377 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
378 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
379 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
380 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
381 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
382 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
383 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
384 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
385 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
386 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
387 VMX_VMCS64_CTRL_SPPTP_FULL,
388 VMX_VMCS64_CTRL_SPPTP_HIGH,
389 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
390 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
391 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
392 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
393 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
394 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
395
396 /* 64-bit read-only data fields. */
397 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
398 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
399
400 /* 64-bit guest-state fields. */
401 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
402 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
403 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
404 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
405 VMX_VMCS64_GUEST_PAT_FULL,
406 VMX_VMCS64_GUEST_PAT_HIGH,
407 VMX_VMCS64_GUEST_EFER_FULL,
408 VMX_VMCS64_GUEST_EFER_HIGH,
409 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
410 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
411 VMX_VMCS64_GUEST_PDPTE0_FULL,
412 VMX_VMCS64_GUEST_PDPTE0_HIGH,
413 VMX_VMCS64_GUEST_PDPTE1_FULL,
414 VMX_VMCS64_GUEST_PDPTE1_HIGH,
415 VMX_VMCS64_GUEST_PDPTE2_FULL,
416 VMX_VMCS64_GUEST_PDPTE2_HIGH,
417 VMX_VMCS64_GUEST_PDPTE3_FULL,
418 VMX_VMCS64_GUEST_PDPTE3_HIGH,
419 VMX_VMCS64_GUEST_BNDCFGS_FULL,
420 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
421 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
422 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
423 VMX_VMCS64_GUEST_PKRS_FULL,
424 VMX_VMCS64_GUEST_PKRS_HIGH,
425
426 /* 64-bit host-state fields. */
427 VMX_VMCS64_HOST_PAT_FULL,
428 VMX_VMCS64_HOST_PAT_HIGH,
429 VMX_VMCS64_HOST_EFER_FULL,
430 VMX_VMCS64_HOST_EFER_HIGH,
431 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
432 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
433 VMX_VMCS64_HOST_PKRS_FULL,
434 VMX_VMCS64_HOST_PKRS_HIGH,
435
436 /* 32-bit control fields. */
437 VMX_VMCS32_CTRL_PIN_EXEC,
438 VMX_VMCS32_CTRL_PROC_EXEC,
439 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
440 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
441 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
442 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
443 VMX_VMCS32_CTRL_EXIT,
444 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
445 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
446 VMX_VMCS32_CTRL_ENTRY,
447 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
448 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
449 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
450 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
451 VMX_VMCS32_CTRL_TPR_THRESHOLD,
452 VMX_VMCS32_CTRL_PROC_EXEC2,
453 VMX_VMCS32_CTRL_PLE_GAP,
454 VMX_VMCS32_CTRL_PLE_WINDOW,
455
456 /* 32-bits read-only fields. */
457 VMX_VMCS32_RO_VM_INSTR_ERROR,
458 VMX_VMCS32_RO_EXIT_REASON,
459 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
460 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
461 VMX_VMCS32_RO_IDT_VECTORING_INFO,
462 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
463 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
464 VMX_VMCS32_RO_EXIT_INSTR_INFO,
465
466 /* 32-bit guest-state fields. */
467 VMX_VMCS32_GUEST_ES_LIMIT,
468 VMX_VMCS32_GUEST_CS_LIMIT,
469 VMX_VMCS32_GUEST_SS_LIMIT,
470 VMX_VMCS32_GUEST_DS_LIMIT,
471 VMX_VMCS32_GUEST_FS_LIMIT,
472 VMX_VMCS32_GUEST_GS_LIMIT,
473 VMX_VMCS32_GUEST_LDTR_LIMIT,
474 VMX_VMCS32_GUEST_TR_LIMIT,
475 VMX_VMCS32_GUEST_GDTR_LIMIT,
476 VMX_VMCS32_GUEST_IDTR_LIMIT,
477 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
478 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
479 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
480 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
481 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
482 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
483 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
484 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
485 VMX_VMCS32_GUEST_INT_STATE,
486 VMX_VMCS32_GUEST_ACTIVITY_STATE,
487 VMX_VMCS32_GUEST_SMBASE,
488 VMX_VMCS32_GUEST_SYSENTER_CS,
489 VMX_VMCS32_PREEMPT_TIMER_VALUE,
490
491 /* 32-bit host-state fields. */
492 VMX_VMCS32_HOST_SYSENTER_CS,
493
494 /* Natural-width control fields. */
495 VMX_VMCS_CTRL_CR0_MASK,
496 VMX_VMCS_CTRL_CR4_MASK,
497 VMX_VMCS_CTRL_CR0_READ_SHADOW,
498 VMX_VMCS_CTRL_CR4_READ_SHADOW,
499 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
500 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
501 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
502 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
503
504 /* Natural-width read-only data fields. */
505 VMX_VMCS_RO_EXIT_QUALIFICATION,
506 VMX_VMCS_RO_IO_RCX,
507 VMX_VMCS_RO_IO_RSI,
508 VMX_VMCS_RO_IO_RDI,
509 VMX_VMCS_RO_IO_RIP,
510 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
511
512 /* Natural-width guest-state field */
513 VMX_VMCS_GUEST_CR0,
514 VMX_VMCS_GUEST_CR3,
515 VMX_VMCS_GUEST_CR4,
516 VMX_VMCS_GUEST_ES_BASE,
517 VMX_VMCS_GUEST_CS_BASE,
518 VMX_VMCS_GUEST_SS_BASE,
519 VMX_VMCS_GUEST_DS_BASE,
520 VMX_VMCS_GUEST_FS_BASE,
521 VMX_VMCS_GUEST_GS_BASE,
522 VMX_VMCS_GUEST_LDTR_BASE,
523 VMX_VMCS_GUEST_TR_BASE,
524 VMX_VMCS_GUEST_GDTR_BASE,
525 VMX_VMCS_GUEST_IDTR_BASE,
526 VMX_VMCS_GUEST_DR7,
527 VMX_VMCS_GUEST_RSP,
528 VMX_VMCS_GUEST_RIP,
529 VMX_VMCS_GUEST_RFLAGS,
530 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
531 VMX_VMCS_GUEST_SYSENTER_ESP,
532 VMX_VMCS_GUEST_SYSENTER_EIP,
533 VMX_VMCS_GUEST_S_CET,
534 VMX_VMCS_GUEST_SSP,
535 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
536
537 /* Natural-width host-state fields */
538 VMX_VMCS_HOST_CR0,
539 VMX_VMCS_HOST_CR3,
540 VMX_VMCS_HOST_CR4,
541 VMX_VMCS_HOST_FS_BASE,
542 VMX_VMCS_HOST_GS_BASE,
543 VMX_VMCS_HOST_TR_BASE,
544 VMX_VMCS_HOST_GDTR_BASE,
545 VMX_VMCS_HOST_IDTR_BASE,
546 VMX_VMCS_HOST_SYSENTER_ESP,
547 VMX_VMCS_HOST_SYSENTER_EIP,
548 VMX_VMCS_HOST_RSP,
549 VMX_VMCS_HOST_RIP,
550 VMX_VMCS_HOST_S_CET,
551 VMX_VMCS_HOST_SSP,
552 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
553};
554#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
555
556#ifdef VBOX_STRICT
557static const uint32_t g_aVmcsSegBase[] =
558{
559 VMX_VMCS_GUEST_ES_BASE,
560 VMX_VMCS_GUEST_CS_BASE,
561 VMX_VMCS_GUEST_SS_BASE,
562 VMX_VMCS_GUEST_DS_BASE,
563 VMX_VMCS_GUEST_FS_BASE,
564 VMX_VMCS_GUEST_GS_BASE
565};
566static const uint32_t g_aVmcsSegSel[] =
567{
568 VMX_VMCS16_GUEST_ES_SEL,
569 VMX_VMCS16_GUEST_CS_SEL,
570 VMX_VMCS16_GUEST_SS_SEL,
571 VMX_VMCS16_GUEST_DS_SEL,
572 VMX_VMCS16_GUEST_FS_SEL,
573 VMX_VMCS16_GUEST_GS_SEL
574};
575static const uint32_t g_aVmcsSegLimit[] =
576{
577 VMX_VMCS32_GUEST_ES_LIMIT,
578 VMX_VMCS32_GUEST_CS_LIMIT,
579 VMX_VMCS32_GUEST_SS_LIMIT,
580 VMX_VMCS32_GUEST_DS_LIMIT,
581 VMX_VMCS32_GUEST_FS_LIMIT,
582 VMX_VMCS32_GUEST_GS_LIMIT
583};
584static const uint32_t g_aVmcsSegAttr[] =
585{
586 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
587 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
588 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
589 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
590 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
591 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
592};
593AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT);
594AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
595AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT);
596AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT);
597#endif /* VBOX_STRICT */
598
599#ifdef HMVMX_USE_FUNCTION_TABLE
600/**
601 * VMX_EXIT dispatch table.
602 */
603static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
604{
605 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
606 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
607 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
608 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
609 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
610 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
611 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
612 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
613 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
614 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
615 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
616 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
617 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
618 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
619 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
620 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
621 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
622 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
623 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
624#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
625 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
626 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
627 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
628 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
629 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
630 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
631 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
632 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
633 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
634#else
635 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
636 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
637 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
638 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
639 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
640 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
641 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
642 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
643 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
644#endif
645 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
646 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
647 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
648 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
649 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
650 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
651 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
652 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
653 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
654 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
655 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
656 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
657 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
658 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
659 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
660 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
661 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
662 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
663 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
664 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
665 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
666 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
667#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT)
668 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
669#else
670 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
671#endif
672 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
673 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
674#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
675 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
676#else
677 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
678#endif
679 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
680 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
681 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
682 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
683 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
684 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
685 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
686 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
687 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
688 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
689 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
690 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
691 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
692 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
693 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
694 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
695};
696#endif /* HMVMX_USE_FUNCTION_TABLE */
697
698#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
699static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
700{
701 /* 0 */ "(Not Used)",
702 /* 1 */ "VMCALL executed in VMX root operation.",
703 /* 2 */ "VMCLEAR with invalid physical address.",
704 /* 3 */ "VMCLEAR with VMXON pointer.",
705 /* 4 */ "VMLAUNCH with non-clear VMCS.",
706 /* 5 */ "VMRESUME with non-launched VMCS.",
707 /* 6 */ "VMRESUME after VMXOFF",
708 /* 7 */ "VM-entry with invalid control fields.",
709 /* 8 */ "VM-entry with invalid host state fields.",
710 /* 9 */ "VMPTRLD with invalid physical address.",
711 /* 10 */ "VMPTRLD with VMXON pointer.",
712 /* 11 */ "VMPTRLD with incorrect revision identifier.",
713 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
714 /* 13 */ "VMWRITE to read-only VMCS component.",
715 /* 14 */ "(Not Used)",
716 /* 15 */ "VMXON executed in VMX root operation.",
717 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
718 /* 17 */ "VM-entry with non-launched executing VMCS.",
719 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
720 /* 19 */ "VMCALL with non-clear VMCS.",
721 /* 20 */ "VMCALL with invalid VM-exit control fields.",
722 /* 21 */ "(Not Used)",
723 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
724 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
725 /* 24 */ "VMCALL with invalid SMM-monitor features.",
726 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
727 /* 26 */ "VM-entry with events blocked by MOV SS.",
728 /* 27 */ "(Not Used)",
729 /* 28 */ "Invalid operand to INVEPT/INVVPID."
730};
731#endif /* VBOX_STRICT && LOG_ENABLED */
732
733
734/**
735 * Gets the CR0 guest/host mask.
736 *
737 * These bits typically does not change through the lifetime of a VM. Any bit set in
738 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
739 * by the guest.
740 *
741 * @returns The CR0 guest/host mask.
742 * @param pVCpu The cross context virtual CPU structure.
743 */
744static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
745{
746 /*
747 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
748 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
749 *
750 * Furthermore, modifications to any bits that are reserved/unspecified currently
751 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
752 * when future CPUs specify and use currently reserved/unspecified bits.
753 */
754 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
755 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
756 * and @bugref{6944}. */
757 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
758 return ( X86_CR0_PE
759 | X86_CR0_NE
760 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
761 | X86_CR0_PG
762 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
763}
764
765
766/**
767 * Gets the CR4 guest/host mask.
768 *
769 * These bits typically does not change through the lifetime of a VM. Any bit set in
770 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
771 * by the guest.
772 *
773 * @returns The CR4 guest/host mask.
774 * @param pVCpu The cross context virtual CPU structure.
775 */
776static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
777{
778 /*
779 * We construct a mask of all CR4 bits that the guest can modify without causing
780 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
781 * a VM-exit when the guest attempts to modify them when executing using
782 * hardware-assisted VMX.
783 *
784 * When a feature is not exposed to the guest (and may be present on the host),
785 * we want to intercept guest modifications to the bit so we can emulate proper
786 * behavior (e.g., #GP).
787 *
788 * Furthermore, only modifications to those bits that don't require immediate
789 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
790 * depends on CR3 which might not always be the guest value while executing
791 * using hardware-assisted VMX.
792 */
793 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
794 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
795 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
796 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
797
798 /*
799 * Paranoia.
800 * Ensure features exposed to the guest are present on the host.
801 */
802 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
803 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
804 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
805
806 uint64_t const fGstMask = ( X86_CR4_PVI
807 | X86_CR4_TSD
808 | X86_CR4_DE
809 | X86_CR4_MCE
810 | X86_CR4_PCE
811 | X86_CR4_OSXMMEEXCPT
812 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
813 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
814 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0));
815 return ~fGstMask;
816}
817
818
819/**
820 * Adds one or more exceptions to the exception bitmap and commits it to the current
821 * VMCS.
822 *
823 * @param pVCpu The cross context virtual CPU structure.
824 * @param pVmxTransient The VMX-transient structure.
825 * @param uXcptMask The exception(s) to add.
826 */
827static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
828{
829 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
830 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
831 if ((uXcptBitmap & uXcptMask) != uXcptMask)
832 {
833 uXcptBitmap |= uXcptMask;
834 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
835 AssertRC(rc);
836 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
837 }
838}
839
840
841/**
842 * Adds an exception to the exception bitmap and commits it to the current VMCS.
843 *
844 * @param pVCpu The cross context virtual CPU structure.
845 * @param pVmxTransient The VMX-transient structure.
846 * @param uXcpt The exception to add.
847 */
848static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
849{
850 Assert(uXcpt <= X86_XCPT_LAST);
851 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
852}
853
854
855/**
856 * Remove one or more exceptions from the exception bitmap and commits it to the
857 * current VMCS.
858 *
859 * This takes care of not removing the exception intercept if a nested-guest
860 * requires the exception to be intercepted.
861 *
862 * @returns VBox status code.
863 * @param pVCpu The cross context virtual CPU structure.
864 * @param pVmxTransient The VMX-transient structure.
865 * @param uXcptMask The exception(s) to remove.
866 */
867static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
868{
869 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
870 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
871 if (u32XcptBitmap & uXcptMask)
872 {
873#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
874 if (!pVmxTransient->fIsNestedGuest)
875 { /* likely */ }
876 else
877 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
878#endif
879#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
880 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
881 | RT_BIT(X86_XCPT_DE)
882 | RT_BIT(X86_XCPT_NM)
883 | RT_BIT(X86_XCPT_TS)
884 | RT_BIT(X86_XCPT_UD)
885 | RT_BIT(X86_XCPT_NP)
886 | RT_BIT(X86_XCPT_SS)
887 | RT_BIT(X86_XCPT_GP)
888 | RT_BIT(X86_XCPT_PF)
889 | RT_BIT(X86_XCPT_MF));
890#elif defined(HMVMX_ALWAYS_TRAP_PF)
891 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
892#endif
893 if (uXcptMask)
894 {
895 /* Validate we are not removing any essential exception intercepts. */
896#ifndef IN_NEM_DARWIN
897 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
898#else
899 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
900#endif
901 NOREF(pVCpu);
902 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
903 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
904
905 /* Remove it from the exception bitmap. */
906 u32XcptBitmap &= ~uXcptMask;
907
908 /* Commit and update the cache if necessary. */
909 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
910 {
911 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
912 AssertRC(rc);
913 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
914 }
915 }
916 }
917 return VINF_SUCCESS;
918}
919
920
921/**
922 * Remove an exceptions from the exception bitmap and commits it to the current
923 * VMCS.
924 *
925 * @returns VBox status code.
926 * @param pVCpu The cross context virtual CPU structure.
927 * @param pVmxTransient The VMX-transient structure.
928 * @param uXcpt The exception to remove.
929 */
930static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
931{
932 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
933}
934
935
936#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
937/**
938 * Loads the shadow VMCS specified by the VMCS info. object.
939 *
940 * @returns VBox status code.
941 * @param pVmcsInfo The VMCS info. object.
942 *
943 * @remarks Can be called with interrupts disabled.
944 */
945static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
946{
947 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
948 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
949
950 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
951 if (RT_SUCCESS(rc))
952 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
953 return rc;
954}
955
956
957/**
958 * Clears the shadow VMCS specified by the VMCS info. object.
959 *
960 * @returns VBox status code.
961 * @param pVmcsInfo The VMCS info. object.
962 *
963 * @remarks Can be called with interrupts disabled.
964 */
965static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
966{
967 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
968 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
969
970 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
971 if (RT_SUCCESS(rc))
972 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
973 return rc;
974}
975
976
977/**
978 * Switches from and to the specified VMCSes.
979 *
980 * @returns VBox status code.
981 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
982 * @param pVmcsInfoTo The VMCS info. object we are switching to.
983 *
984 * @remarks Called with interrupts disabled.
985 */
986static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
987{
988 /*
989 * Clear the VMCS we are switching out if it has not already been cleared.
990 * This will sync any CPU internal data back to the VMCS.
991 */
992 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
993 {
994 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
995 if (RT_SUCCESS(rc))
996 {
997 /*
998 * The shadow VMCS, if any, would not be active at this point since we
999 * would have cleared it while importing the virtual hardware-virtualization
1000 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
1001 * clear the shadow VMCS here, just assert for safety.
1002 */
1003 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
1004 }
1005 else
1006 return rc;
1007 }
1008
1009 /*
1010 * Clear the VMCS we are switching to if it has not already been cleared.
1011 * This will initialize the VMCS launch state to "clear" required for loading it.
1012 *
1013 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1014 */
1015 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1016 {
1017 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1018 if (RT_SUCCESS(rc))
1019 { /* likely */ }
1020 else
1021 return rc;
1022 }
1023
1024 /*
1025 * Finally, load the VMCS we are switching to.
1026 */
1027 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1028}
1029
1030
1031/**
1032 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1033 * caller.
1034 *
1035 * @returns VBox status code.
1036 * @param pVCpu The cross context virtual CPU structure.
1037 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1038 * true) or guest VMCS (pass false).
1039 */
1040static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1041{
1042 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1043 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1044
1045 PVMXVMCSINFO pVmcsInfoFrom;
1046 PVMXVMCSINFO pVmcsInfoTo;
1047 if (fSwitchToNstGstVmcs)
1048 {
1049 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1050 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1051 }
1052 else
1053 {
1054 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1055 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1056 }
1057
1058 /*
1059 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1060 * preemption hook code path acquires the current VMCS.
1061 */
1062 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1063
1064 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1065 if (RT_SUCCESS(rc))
1066 {
1067 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1068 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1069
1070 /*
1071 * If we are switching to a VMCS that was executed on a different host CPU or was
1072 * never executed before, flag that we need to export the host state before executing
1073 * guest/nested-guest code using hardware-assisted VMX.
1074 *
1075 * This could probably be done in a preemptible context since the preemption hook
1076 * will flag the necessary change in host context. However, since preemption is
1077 * already disabled and to avoid making assumptions about host specific code in
1078 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1079 * disabled.
1080 */
1081 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1082 { /* likely */ }
1083 else
1084 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1085
1086 ASMSetFlags(fEFlags);
1087
1088 /*
1089 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1090 * flag that we need to update the host MSR values there. Even if we decide in the
1091 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1092 * if its content differs, we would have to update the host MSRs anyway.
1093 */
1094 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1095 }
1096 else
1097 ASMSetFlags(fEFlags);
1098 return rc;
1099}
1100#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1101
1102
1103#ifdef VBOX_STRICT
1104/**
1105 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1106 * transient structure.
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @param pVmxTransient The VMX-transient structure.
1110 */
1111DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1112{
1113 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1114 AssertRC(rc);
1115}
1116
1117
1118/**
1119 * Reads the VM-entry exception error code field from the VMCS into
1120 * the VMX transient structure.
1121 *
1122 * @param pVCpu The cross context virtual CPU structure.
1123 * @param pVmxTransient The VMX-transient structure.
1124 */
1125DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1126{
1127 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1128 AssertRC(rc);
1129}
1130
1131
1132/**
1133 * Reads the VM-entry exception error code field from the VMCS into
1134 * the VMX transient structure.
1135 *
1136 * @param pVCpu The cross context virtual CPU structure.
1137 * @param pVmxTransient The VMX-transient structure.
1138 */
1139DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1140{
1141 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1142 AssertRC(rc);
1143}
1144#endif /* VBOX_STRICT */
1145
1146
1147/**
1148 * Reads the VM-exit interruption-information field from the VMCS into the VMX
1149 * transient structure.
1150 *
1151 * @param pVCpu The cross context virtual CPU structure.
1152 * @param pVmxTransient The VMX-transient structure.
1153 */
1154DECLINLINE(void) vmxHCReadExitIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1155{
1156 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1157 {
1158 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1159 AssertRC(rc);
1160 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
1161 }
1162}
1163
1164
1165/**
1166 * Reads the VM-exit interruption error code from the VMCS into the VMX
1167 * transient structure.
1168 *
1169 * @param pVCpu The cross context virtual CPU structure.
1170 * @param pVmxTransient The VMX-transient structure.
1171 */
1172DECLINLINE(void) vmxHCReadExitIntErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1173{
1174 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1175 {
1176 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1177 AssertRC(rc);
1178 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
1179 }
1180}
1181
1182
1183/**
1184 * Reads the VM-exit instruction length field from the VMCS into the VMX
1185 * transient structure.
1186 *
1187 * @param pVCpu The cross context virtual CPU structure.
1188 * @param pVmxTransient The VMX-transient structure.
1189 */
1190DECLINLINE(void) vmxHCReadExitInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1191{
1192 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1193 {
1194 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1195 AssertRC(rc);
1196 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
1197 }
1198}
1199
1200
1201/**
1202 * Reads the VM-exit instruction-information field from the VMCS into
1203 * the VMX transient structure.
1204 *
1205 * @param pVCpu The cross context virtual CPU structure.
1206 * @param pVmxTransient The VMX-transient structure.
1207 */
1208DECLINLINE(void) vmxHCReadExitInstrInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1209{
1210 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1211 {
1212 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1213 AssertRC(rc);
1214 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
1215 }
1216}
1217
1218
1219/**
1220 * Reads the Exit Qualification from the VMCS into the VMX transient structure.
1221 *
1222 * @param pVCpu The cross context virtual CPU structure.
1223 * @param pVmxTransient The VMX-transient structure.
1224 */
1225DECLINLINE(void) vmxHCReadExitQualVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1226{
1227 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1228 {
1229 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1230 AssertRC(rc);
1231 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
1232 }
1233}
1234
1235
1236/**
1237 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
1238 *
1239 * @param pVCpu The cross context virtual CPU structure.
1240 * @param pVmxTransient The VMX-transient structure.
1241 */
1242DECLINLINE(void) vmxHCReadGuestLinearAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1243{
1244 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1245 {
1246 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1247 AssertRC(rc);
1248 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
1249 }
1250}
1251
1252
1253/**
1254 * Reads the Guest-physical address from the VMCS into the VMX transient structure.
1255 *
1256 * @param pVCpu The cross context virtual CPU structure.
1257 * @param pVmxTransient The VMX-transient structure.
1258 */
1259DECLINLINE(void) vmxHCReadGuestPhysicalAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1260{
1261 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1262 {
1263 int rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1264 AssertRC(rc);
1265 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PHYSICAL_ADDR;
1266 }
1267}
1268
1269#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1270/**
1271 * Reads the Guest pending-debug exceptions from the VMCS into the VMX transient
1272 * structure.
1273 *
1274 * @param pVCpu The cross context virtual CPU structure.
1275 * @param pVmxTransient The VMX-transient structure.
1276 */
1277DECLINLINE(void) vmxHCReadGuestPendingDbgXctps(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1278{
1279 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1280 {
1281 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1282 AssertRC(rc);
1283 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PENDING_DBG_XCPTS;
1284 }
1285}
1286#endif
1287
1288/**
1289 * Reads the IDT-vectoring information field from the VMCS into the VMX
1290 * transient structure.
1291 *
1292 * @param pVCpu The cross context virtual CPU structure.
1293 * @param pVmxTransient The VMX-transient structure.
1294 *
1295 * @remarks No-long-jump zone!!!
1296 */
1297DECLINLINE(void) vmxHCReadIdtVectoringInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1298{
1299 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1300 {
1301 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1302 AssertRC(rc);
1303 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
1304 }
1305}
1306
1307
1308/**
1309 * Reads the IDT-vectoring error code from the VMCS into the VMX
1310 * transient structure.
1311 *
1312 * @param pVCpu The cross context virtual CPU structure.
1313 * @param pVmxTransient The VMX-transient structure.
1314 */
1315DECLINLINE(void) vmxHCReadIdtVectoringErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1316{
1317 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1318 {
1319 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1320 AssertRC(rc);
1321 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
1322 }
1323}
1324
1325#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1326/**
1327 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1328 *
1329 * @param pVCpu The cross context virtual CPU structure.
1330 * @param pVmxTransient The VMX-transient structure.
1331 */
1332static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1333{
1334 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1335 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1336 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1337 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1338 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1339 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1340 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1341 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1342 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1343 AssertRC(rc);
1344 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1345 | HMVMX_READ_EXIT_INSTR_LEN
1346 | HMVMX_READ_EXIT_INSTR_INFO
1347 | HMVMX_READ_IDT_VECTORING_INFO
1348 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1349 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1350 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1351 | HMVMX_READ_GUEST_LINEAR_ADDR
1352 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1353}
1354#endif
1355
1356/**
1357 * Verifies that our cached values of the VMCS fields are all consistent with
1358 * what's actually present in the VMCS.
1359 *
1360 * @returns VBox status code.
1361 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1362 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1363 * VMCS content. HMCPU error-field is
1364 * updated, see VMX_VCI_XXX.
1365 * @param pVCpu The cross context virtual CPU structure.
1366 * @param pVmcsInfo The VMCS info. object.
1367 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1368 */
1369static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1370{
1371 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1372
1373 uint32_t u32Val;
1374 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1375 AssertRC(rc);
1376 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1377 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1378 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1379 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1380
1381 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1382 AssertRC(rc);
1383 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1384 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1385 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1386 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1387
1388 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1389 AssertRC(rc);
1390 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1391 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1392 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1393 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1394
1395 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1396 AssertRC(rc);
1397 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1398 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1399 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1400 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1401
1402 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1403 {
1404 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1405 AssertRC(rc);
1406 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1407 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1408 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1409 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1410 }
1411
1412 uint64_t u64Val;
1413 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1414 {
1415 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1416 AssertRC(rc);
1417 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1418 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1419 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1420 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1421 }
1422
1423 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1424 AssertRC(rc);
1425 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1426 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1427 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1428 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1429
1430 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1431 AssertRC(rc);
1432 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1433 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1434 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1435 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1436
1437 NOREF(pcszVmcs);
1438 return VINF_SUCCESS;
1439}
1440
1441
1442/**
1443 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1444 * VMCS.
1445 *
1446 * This is typically required when the guest changes paging mode.
1447 *
1448 * @returns VBox status code.
1449 * @param pVCpu The cross context virtual CPU structure.
1450 * @param pVmxTransient The VMX-transient structure.
1451 *
1452 * @remarks Requires EFER.
1453 * @remarks No-long-jump zone!!!
1454 */
1455static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1456{
1457 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1458 {
1459 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1460 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1461
1462 /*
1463 * VM-entry controls.
1464 */
1465 {
1466 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1467 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1468
1469 /*
1470 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1471 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1472 *
1473 * For nested-guests, this is a mandatory VM-entry control. It's also
1474 * required because we do not want to leak host bits to the nested-guest.
1475 */
1476 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1477
1478 /*
1479 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1480 *
1481 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1482 * required to get the nested-guest working with hardware-assisted VMX execution.
1483 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1484 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1485 * here rather than while merging the guest VMCS controls.
1486 */
1487 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1488 {
1489 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1490 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1491 }
1492 else
1493 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1494
1495 /*
1496 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1497 *
1498 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1499 * regardless of whether the nested-guest VMCS specifies it because we are free to
1500 * load whatever MSRs we require and we do not need to modify the guest visible copy
1501 * of the VM-entry MSR load area.
1502 */
1503 if ( g_fHmVmxSupportsVmcsEfer
1504#ifndef IN_NEM_DARWIN
1505 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1506#endif
1507 )
1508 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1509 else
1510 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1511
1512 /*
1513 * The following should -not- be set (since we're not in SMM mode):
1514 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1515 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1516 */
1517
1518 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1519 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1520
1521 if ((fVal & fZap) == fVal)
1522 { /* likely */ }
1523 else
1524 {
1525 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1526 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1527 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1528 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1529 }
1530
1531 /* Commit it to the VMCS. */
1532 if (pVmcsInfo->u32EntryCtls != fVal)
1533 {
1534 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1535 AssertRC(rc);
1536 pVmcsInfo->u32EntryCtls = fVal;
1537 }
1538 }
1539
1540 /*
1541 * VM-exit controls.
1542 */
1543 {
1544 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1545 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1546
1547 /*
1548 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1549 * supported the 1-setting of this bit.
1550 *
1551 * For nested-guests, we set the "save debug controls" as the converse
1552 * "load debug controls" is mandatory for nested-guests anyway.
1553 */
1554 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1555
1556 /*
1557 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1558 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1559 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1560 * vmxHCExportHostMsrs().
1561 *
1562 * For nested-guests, we always set this bit as we do not support 32-bit
1563 * hosts.
1564 */
1565 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1566
1567#ifndef IN_NEM_DARWIN
1568 /*
1569 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1570 *
1571 * For nested-guests, we should use the "save IA32_EFER" control if we also
1572 * used the "load IA32_EFER" control while exporting VM-entry controls.
1573 */
1574 if ( g_fHmVmxSupportsVmcsEfer
1575 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1576 {
1577 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1578 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1579 }
1580#endif
1581
1582 /*
1583 * Enable saving of the VMX-preemption timer value on VM-exit.
1584 * For nested-guests, currently not exposed/used.
1585 */
1586 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1587 * the timer value. */
1588 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1589 {
1590 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1591 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1592 }
1593
1594 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1595 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1596
1597 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1598 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1599 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1600
1601 if ((fVal & fZap) == fVal)
1602 { /* likely */ }
1603 else
1604 {
1605 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1606 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1607 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1608 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1609 }
1610
1611 /* Commit it to the VMCS. */
1612 if (pVmcsInfo->u32ExitCtls != fVal)
1613 {
1614 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1615 AssertRC(rc);
1616 pVmcsInfo->u32ExitCtls = fVal;
1617 }
1618 }
1619
1620 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1621 }
1622 return VINF_SUCCESS;
1623}
1624
1625
1626/**
1627 * Sets the TPR threshold in the VMCS.
1628 *
1629 * @param pVCpu The cross context virtual CPU structure.
1630 * @param pVmcsInfo The VMCS info. object.
1631 * @param u32TprThreshold The TPR threshold (task-priority class only).
1632 */
1633DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1634{
1635 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1636 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1637 RT_NOREF(pVmcsInfo);
1638 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1639 AssertRC(rc);
1640}
1641
1642
1643/**
1644 * Exports the guest APIC TPR state into the VMCS.
1645 *
1646 * @param pVCpu The cross context virtual CPU structure.
1647 * @param pVmxTransient The VMX-transient structure.
1648 *
1649 * @remarks No-long-jump zone!!!
1650 */
1651static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1652{
1653 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1654 {
1655 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1656
1657 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1658 if (!pVmxTransient->fIsNestedGuest)
1659 {
1660 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1661 && APICIsEnabled(pVCpu))
1662 {
1663 /*
1664 * Setup TPR shadowing.
1665 */
1666 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1667 {
1668 bool fPendingIntr = false;
1669 uint8_t u8Tpr = 0;
1670 uint8_t u8PendingIntr = 0;
1671 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1672 AssertRC(rc);
1673
1674 /*
1675 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1676 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1677 * priority of the pending interrupt so we can deliver the interrupt. If there
1678 * are no interrupts pending, set threshold to 0 to not cause any
1679 * TPR-below-threshold VM-exits.
1680 */
1681 uint32_t u32TprThreshold = 0;
1682 if (fPendingIntr)
1683 {
1684 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1685 (which is the Task-Priority Class). */
1686 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1687 const uint8_t u8TprPriority = u8Tpr >> 4;
1688 if (u8PendingPriority <= u8TprPriority)
1689 u32TprThreshold = u8PendingPriority;
1690 }
1691
1692 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1693 }
1694 }
1695 }
1696 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1697 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1698 }
1699}
1700
1701
1702/**
1703 * Gets the guest interruptibility-state and updates related force-flags.
1704 *
1705 * @returns Guest's interruptibility-state.
1706 * @param pVCpu The cross context virtual CPU structure.
1707 *
1708 * @remarks No-long-jump zone!!!
1709 */
1710static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1711{
1712 /*
1713 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1714 */
1715 uint32_t fIntrState = 0;
1716 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1717 {
1718 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
1719 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
1720
1721 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1722 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
1723 {
1724 if (pCtx->eflags.Bits.u1IF)
1725 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1726 else
1727 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1728 }
1729 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1730 {
1731 /*
1732 * We can clear the inhibit force flag as even if we go back to the recompiler
1733 * without executing guest code in VT-x, the flag's condition to be cleared is
1734 * met and thus the cleared state is correct.
1735 */
1736 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1737 }
1738 }
1739
1740 /*
1741 * Check if we should inhibit NMI delivery.
1742 */
1743 if (CPUMIsGuestNmiBlocking(pVCpu))
1744 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1745
1746 /*
1747 * Validate.
1748 */
1749#ifdef VBOX_STRICT
1750 /* We don't support block-by-SMI yet.*/
1751 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1752
1753 /* Block-by-STI must not be set when interrupts are disabled. */
1754 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
1755 {
1756 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1757 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
1758 }
1759#endif
1760
1761 return fIntrState;
1762}
1763
1764
1765/**
1766 * Exports the exception intercepts required for guest execution in the VMCS.
1767 *
1768 * @param pVCpu The cross context virtual CPU structure.
1769 * @param pVmxTransient The VMX-transient structure.
1770 *
1771 * @remarks No-long-jump zone!!!
1772 */
1773static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1774{
1775 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1776 {
1777 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1778 if ( !pVmxTransient->fIsNestedGuest
1779 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1780 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1781 else
1782 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1783
1784 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1785 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1786 }
1787}
1788
1789
1790/**
1791 * Exports the guest's RIP into the guest-state area in the VMCS.
1792 *
1793 * @param pVCpu The cross context virtual CPU structure.
1794 *
1795 * @remarks No-long-jump zone!!!
1796 */
1797static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1798{
1799 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1800 {
1801 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1802
1803 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1804 AssertRC(rc);
1805
1806 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1807 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1808 }
1809}
1810
1811
1812/**
1813 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1814 *
1815 * @param pVCpu The cross context virtual CPU structure.
1816 * @param pVmxTransient The VMX-transient structure.
1817 *
1818 * @remarks No-long-jump zone!!!
1819 */
1820static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1821{
1822 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1823 {
1824 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1825
1826 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1827 Let us assert it as such and use 32-bit VMWRITE. */
1828 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1829 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1830 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
1831 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
1832
1833#ifndef IN_NEM_DARWIN
1834 /*
1835 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1836 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1837 * can run the real-mode guest code under Virtual 8086 mode.
1838 */
1839 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1840 if (pVmcsInfo->RealMode.fRealOnV86Active)
1841 {
1842 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1843 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1844 Assert(!pVmxTransient->fIsNestedGuest);
1845 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
1846 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
1847 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1848 }
1849#else
1850 RT_NOREF(pVmxTransient);
1851#endif
1852
1853 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
1854 AssertRC(rc);
1855
1856 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1857 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
1858 }
1859}
1860
1861
1862#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1863/**
1864 * Copies the nested-guest VMCS to the shadow VMCS.
1865 *
1866 * @returns VBox status code.
1867 * @param pVCpu The cross context virtual CPU structure.
1868 * @param pVmcsInfo The VMCS info. object.
1869 *
1870 * @remarks No-long-jump zone!!!
1871 */
1872static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1873{
1874 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1875 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1876
1877 /*
1878 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1879 * current VMCS, as we may try saving guest lazy MSRs.
1880 *
1881 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1882 * calling the import VMCS code which is currently performing the guest MSR reads
1883 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1884 * and the rest of the VMX leave session machinery.
1885 */
1886 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1887
1888 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1889 if (RT_SUCCESS(rc))
1890 {
1891 /*
1892 * Copy all guest read/write VMCS fields.
1893 *
1894 * We don't check for VMWRITE failures here for performance reasons and
1895 * because they are not expected to fail, barring irrecoverable conditions
1896 * like hardware errors.
1897 */
1898 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1899 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1900 {
1901 uint64_t u64Val;
1902 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1903 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1904 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1905 }
1906
1907 /*
1908 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1909 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1910 */
1911 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1912 {
1913 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1914 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1915 {
1916 uint64_t u64Val;
1917 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1918 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1919 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1920 }
1921 }
1922
1923 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1924 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1925 }
1926
1927 ASMSetFlags(fEFlags);
1928 return rc;
1929}
1930
1931
1932/**
1933 * Copies the shadow VMCS to the nested-guest VMCS.
1934 *
1935 * @returns VBox status code.
1936 * @param pVCpu The cross context virtual CPU structure.
1937 * @param pVmcsInfo The VMCS info. object.
1938 *
1939 * @remarks Called with interrupts disabled.
1940 */
1941static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1942{
1943 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1944 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1945 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1946
1947 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1948 if (RT_SUCCESS(rc))
1949 {
1950 /*
1951 * Copy guest read/write fields from the shadow VMCS.
1952 * Guest read-only fields cannot be modified, so no need to copy them.
1953 *
1954 * We don't check for VMREAD failures here for performance reasons and
1955 * because they are not expected to fail, barring irrecoverable conditions
1956 * like hardware errors.
1957 */
1958 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1959 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1960 {
1961 uint64_t u64Val;
1962 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1963 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1964 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1965 }
1966
1967 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1968 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1969 }
1970 return rc;
1971}
1972
1973
1974/**
1975 * Enables VMCS shadowing for the given VMCS info. object.
1976 *
1977 * @param pVCpu The cross context virtual CPU structure.
1978 * @param pVmcsInfo The VMCS info. object.
1979 *
1980 * @remarks No-long-jump zone!!!
1981 */
1982static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1983{
1984 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1985 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1986 {
1987 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1988 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1989 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1990 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1991 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1992 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1993 Log4Func(("Enabled\n"));
1994 }
1995}
1996
1997
1998/**
1999 * Disables VMCS shadowing for the given VMCS info. object.
2000 *
2001 * @param pVCpu The cross context virtual CPU structure.
2002 * @param pVmcsInfo The VMCS info. object.
2003 *
2004 * @remarks No-long-jump zone!!!
2005 */
2006static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2007{
2008 /*
2009 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
2010 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
2011 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2012 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2013 *
2014 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2015 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2016 */
2017 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2018 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2019 {
2020 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2021 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2022 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2023 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2024 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2025 Log4Func(("Disabled\n"));
2026 }
2027}
2028#endif
2029
2030
2031/**
2032 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2033 *
2034 * The guest FPU state is always pre-loaded hence we don't need to bother about
2035 * sharing FPU related CR0 bits between the guest and host.
2036 *
2037 * @returns VBox status code.
2038 * @param pVCpu The cross context virtual CPU structure.
2039 * @param pVmxTransient The VMX-transient structure.
2040 *
2041 * @remarks No-long-jump zone!!!
2042 */
2043static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2044{
2045 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2046 {
2047 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2048 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2049
2050 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2051 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2052 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2053 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2054 else
2055 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2056
2057 if (!pVmxTransient->fIsNestedGuest)
2058 {
2059 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2060 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2061 uint64_t const u64ShadowCr0 = u64GuestCr0;
2062 Assert(!RT_HI_U32(u64GuestCr0));
2063
2064 /*
2065 * Setup VT-x's view of the guest CR0.
2066 */
2067 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2068 if (VM_IS_VMX_NESTED_PAGING(pVM))
2069 {
2070#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2071 if (CPUMIsGuestPagingEnabled(pVCpu))
2072 {
2073 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2074 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2075 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2076 }
2077 else
2078 {
2079 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2080 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2081 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2082 }
2083
2084 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2085 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2086 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2087#endif
2088 }
2089 else
2090 {
2091 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2092 u64GuestCr0 |= X86_CR0_WP;
2093 }
2094
2095 /*
2096 * Guest FPU bits.
2097 *
2098 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2099 * using CR0.TS.
2100 *
2101 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2102 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2103 */
2104 u64GuestCr0 |= X86_CR0_NE;
2105
2106 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2107 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2108
2109 /*
2110 * Update exception intercepts.
2111 */
2112 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2113#ifndef IN_NEM_DARWIN
2114 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2115 {
2116 Assert(PDMVmmDevHeapIsEnabled(pVM));
2117 Assert(pVM->hm.s.vmx.pRealModeTSS);
2118 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2119 }
2120 else
2121#endif
2122 {
2123 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2124 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2125 if (fInterceptMF)
2126 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2127 }
2128
2129 /* Additional intercepts for debugging, define these yourself explicitly. */
2130#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2131 uXcptBitmap |= 0
2132 | RT_BIT(X86_XCPT_BP)
2133 | RT_BIT(X86_XCPT_DE)
2134 | RT_BIT(X86_XCPT_NM)
2135 | RT_BIT(X86_XCPT_TS)
2136 | RT_BIT(X86_XCPT_UD)
2137 | RT_BIT(X86_XCPT_NP)
2138 | RT_BIT(X86_XCPT_SS)
2139 | RT_BIT(X86_XCPT_GP)
2140 | RT_BIT(X86_XCPT_PF)
2141 | RT_BIT(X86_XCPT_MF)
2142 ;
2143#elif defined(HMVMX_ALWAYS_TRAP_PF)
2144 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2145#endif
2146 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2147 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2148 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2149 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2150 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2151
2152 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2153 u64GuestCr0 |= fSetCr0;
2154 u64GuestCr0 &= fZapCr0;
2155 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2156
2157 /* Commit the CR0 and related fields to the guest VMCS. */
2158 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2159 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2160 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2161 {
2162 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2163 AssertRC(rc);
2164 }
2165 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2166 {
2167 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2168 AssertRC(rc);
2169 }
2170
2171 /* Update our caches. */
2172 pVmcsInfo->u32ProcCtls = uProcCtls;
2173 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2174
2175 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2176 }
2177 else
2178 {
2179 /*
2180 * With nested-guests, we may have extended the guest/host mask here since we
2181 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2182 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2183 * originally supplied. We must copy those bits from the nested-guest CR0 into
2184 * the nested-guest CR0 read-shadow.
2185 */
2186 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2187 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2188 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2189 Assert(!RT_HI_U32(u64GuestCr0));
2190 Assert(u64GuestCr0 & X86_CR0_NE);
2191
2192 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2193 u64GuestCr0 |= fSetCr0;
2194 u64GuestCr0 &= fZapCr0;
2195 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2196
2197 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2198 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2199 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2200
2201 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2202 }
2203
2204 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2205 }
2206
2207 return VINF_SUCCESS;
2208}
2209
2210
2211/**
2212 * Exports the guest control registers (CR3, CR4) into the guest-state area
2213 * in the VMCS.
2214 *
2215 * @returns VBox strict status code.
2216 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2217 * without unrestricted guest access and the VMMDev is not presently
2218 * mapped (e.g. EFI32).
2219 *
2220 * @param pVCpu The cross context virtual CPU structure.
2221 * @param pVmxTransient The VMX-transient structure.
2222 *
2223 * @remarks No-long-jump zone!!!
2224 */
2225static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2226{
2227 int rc = VINF_SUCCESS;
2228 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2229
2230 /*
2231 * Guest CR2.
2232 * It's always loaded in the assembler code. Nothing to do here.
2233 */
2234
2235 /*
2236 * Guest CR3.
2237 */
2238 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2239 {
2240 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2241
2242 if (VM_IS_VMX_NESTED_PAGING(pVM))
2243 {
2244#ifndef IN_NEM_DARWIN
2245 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2246 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2247
2248 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2249 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2250 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2251 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2252
2253 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2254 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2255 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2256
2257 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2258 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2259 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2260 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2261 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2262 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2263 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2264
2265 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2266 AssertRC(rc);
2267#endif
2268
2269 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2270 uint64_t u64GuestCr3 = pCtx->cr3;
2271 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2272 || CPUMIsGuestPagingEnabledEx(pCtx))
2273 {
2274 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2275 if (CPUMIsGuestInPAEModeEx(pCtx))
2276 {
2277 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2278 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2279 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2280 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2281 }
2282
2283 /*
2284 * The guest's view of its CR3 is unblemished with nested paging when the
2285 * guest is using paging or we have unrestricted guest execution to handle
2286 * the guest when it's not using paging.
2287 */
2288 }
2289#ifndef IN_NEM_DARWIN
2290 else
2291 {
2292 /*
2293 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2294 * thinks it accesses physical memory directly, we use our identity-mapped
2295 * page table to map guest-linear to guest-physical addresses. EPT takes care
2296 * of translating it to host-physical addresses.
2297 */
2298 RTGCPHYS GCPhys;
2299 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2300
2301 /* We obtain it here every time as the guest could have relocated this PCI region. */
2302 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2303 if (RT_SUCCESS(rc))
2304 { /* likely */ }
2305 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2306 {
2307 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2308 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2309 }
2310 else
2311 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2312
2313 u64GuestCr3 = GCPhys;
2314 }
2315#endif
2316
2317 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2318 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2319 AssertRC(rc);
2320 }
2321 else
2322 {
2323 Assert(!pVmxTransient->fIsNestedGuest);
2324 /* Non-nested paging case, just use the hypervisor's CR3. */
2325 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2326
2327 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2328 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2329 AssertRC(rc);
2330 }
2331
2332 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2333 }
2334
2335 /*
2336 * Guest CR4.
2337 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2338 */
2339 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2340 {
2341 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2342 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2343
2344 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2345 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2346
2347 /*
2348 * With nested-guests, we may have extended the guest/host mask here (since we
2349 * merged in the outer guest's mask, see vmxHCMergeVmcsNested). This means, the
2350 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2351 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2352 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2353 */
2354 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2355 uint64_t u64GuestCr4 = pCtx->cr4;
2356 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2357 ? pCtx->cr4
2358 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2359 Assert(!RT_HI_U32(u64GuestCr4));
2360
2361#ifndef IN_NEM_DARWIN
2362 /*
2363 * Setup VT-x's view of the guest CR4.
2364 *
2365 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2366 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2367 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2368 *
2369 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2370 */
2371 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2372 {
2373 Assert(pVM->hm.s.vmx.pRealModeTSS);
2374 Assert(PDMVmmDevHeapIsEnabled(pVM));
2375 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2376 }
2377#endif
2378
2379 if (VM_IS_VMX_NESTED_PAGING(pVM))
2380 {
2381 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2382 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2383 {
2384 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2385 u64GuestCr4 |= X86_CR4_PSE;
2386 /* Our identity mapping is a 32-bit page directory. */
2387 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2388 }
2389 /* else use guest CR4.*/
2390 }
2391 else
2392 {
2393 Assert(!pVmxTransient->fIsNestedGuest);
2394
2395 /*
2396 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2397 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2398 */
2399 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2400 {
2401 case PGMMODE_REAL: /* Real-mode. */
2402 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2403 case PGMMODE_32_BIT: /* 32-bit paging. */
2404 {
2405 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2406 break;
2407 }
2408
2409 case PGMMODE_PAE: /* PAE paging. */
2410 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2411 {
2412 u64GuestCr4 |= X86_CR4_PAE;
2413 break;
2414 }
2415
2416 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2417 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2418 {
2419#ifdef VBOX_WITH_64_BITS_GUESTS
2420 /* For our assumption in vmxHCShouldSwapEferMsr. */
2421 Assert(u64GuestCr4 & X86_CR4_PAE);
2422 break;
2423#endif
2424 }
2425 default:
2426 AssertFailed();
2427 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2428 }
2429 }
2430
2431 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2432 u64GuestCr4 |= fSetCr4;
2433 u64GuestCr4 &= fZapCr4;
2434
2435 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2436 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2437 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2438
2439#ifndef IN_NEM_DARWIN
2440 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2441 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2442 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2443 {
2444 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2445 hmR0VmxUpdateStartVmFunction(pVCpu);
2446 }
2447#endif
2448
2449 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2450
2451 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2452 }
2453 return rc;
2454}
2455
2456
2457#ifdef VBOX_STRICT
2458/**
2459 * Strict function to validate segment registers.
2460 *
2461 * @param pVCpu The cross context virtual CPU structure.
2462 * @param pVmcsInfo The VMCS info. object.
2463 *
2464 * @remarks Will import guest CR0 on strict builds during validation of
2465 * segments.
2466 */
2467static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2468{
2469 /*
2470 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2471 *
2472 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2473 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2474 * unusable bit and doesn't change the guest-context value.
2475 */
2476 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2477 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2478 vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2479 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2480 && ( !CPUMIsGuestInRealModeEx(pCtx)
2481 && !CPUMIsGuestInV86ModeEx(pCtx)))
2482 {
2483 /* Protected mode checks */
2484 /* CS */
2485 Assert(pCtx->cs.Attr.n.u1Present);
2486 Assert(!(pCtx->cs.Attr.u & 0xf00));
2487 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2488 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2489 || !(pCtx->cs.Attr.n.u1Granularity));
2490 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2491 || (pCtx->cs.Attr.n.u1Granularity));
2492 /* CS cannot be loaded with NULL in protected mode. */
2493 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2494 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2495 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2496 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2497 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2498 else
2499 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2500 /* SS */
2501 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2502 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2503 if ( !(pCtx->cr0 & X86_CR0_PE)
2504 || pCtx->cs.Attr.n.u4Type == 3)
2505 {
2506 Assert(!pCtx->ss.Attr.n.u2Dpl);
2507 }
2508 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2509 {
2510 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2511 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2512 Assert(pCtx->ss.Attr.n.u1Present);
2513 Assert(!(pCtx->ss.Attr.u & 0xf00));
2514 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2515 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2516 || !(pCtx->ss.Attr.n.u1Granularity));
2517 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2518 || (pCtx->ss.Attr.n.u1Granularity));
2519 }
2520 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2521 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2522 {
2523 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2524 Assert(pCtx->ds.Attr.n.u1Present);
2525 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2526 Assert(!(pCtx->ds.Attr.u & 0xf00));
2527 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2528 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2529 || !(pCtx->ds.Attr.n.u1Granularity));
2530 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2531 || (pCtx->ds.Attr.n.u1Granularity));
2532 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2533 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2534 }
2535 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2536 {
2537 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2538 Assert(pCtx->es.Attr.n.u1Present);
2539 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2540 Assert(!(pCtx->es.Attr.u & 0xf00));
2541 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2542 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2543 || !(pCtx->es.Attr.n.u1Granularity));
2544 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2545 || (pCtx->es.Attr.n.u1Granularity));
2546 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2547 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2548 }
2549 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2550 {
2551 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2552 Assert(pCtx->fs.Attr.n.u1Present);
2553 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2554 Assert(!(pCtx->fs.Attr.u & 0xf00));
2555 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2556 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2557 || !(pCtx->fs.Attr.n.u1Granularity));
2558 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2559 || (pCtx->fs.Attr.n.u1Granularity));
2560 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2561 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2562 }
2563 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2564 {
2565 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2566 Assert(pCtx->gs.Attr.n.u1Present);
2567 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2568 Assert(!(pCtx->gs.Attr.u & 0xf00));
2569 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2570 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2571 || !(pCtx->gs.Attr.n.u1Granularity));
2572 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2573 || (pCtx->gs.Attr.n.u1Granularity));
2574 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2575 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2576 }
2577 /* 64-bit capable CPUs. */
2578 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2579 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2580 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2581 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2582 }
2583 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2584 || ( CPUMIsGuestInRealModeEx(pCtx)
2585 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2586 {
2587 /* Real and v86 mode checks. */
2588 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2589 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2590#ifndef IN_NEM_DARWIN
2591 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2592 {
2593 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2594 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2595 }
2596 else
2597#endif
2598 {
2599 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2600 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2601 }
2602
2603 /* CS */
2604 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2605 Assert(pCtx->cs.u32Limit == 0xffff);
2606 Assert(u32CSAttr == 0xf3);
2607 /* SS */
2608 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2609 Assert(pCtx->ss.u32Limit == 0xffff);
2610 Assert(u32SSAttr == 0xf3);
2611 /* DS */
2612 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2613 Assert(pCtx->ds.u32Limit == 0xffff);
2614 Assert(u32DSAttr == 0xf3);
2615 /* ES */
2616 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2617 Assert(pCtx->es.u32Limit == 0xffff);
2618 Assert(u32ESAttr == 0xf3);
2619 /* FS */
2620 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2621 Assert(pCtx->fs.u32Limit == 0xffff);
2622 Assert(u32FSAttr == 0xf3);
2623 /* GS */
2624 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2625 Assert(pCtx->gs.u32Limit == 0xffff);
2626 Assert(u32GSAttr == 0xf3);
2627 /* 64-bit capable CPUs. */
2628 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2629 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2630 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2631 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2632 }
2633}
2634#endif /* VBOX_STRICT */
2635
2636
2637/**
2638 * Exports a guest segment register into the guest-state area in the VMCS.
2639 *
2640 * @returns VBox status code.
2641 * @param pVCpu The cross context virtual CPU structure.
2642 * @param pVmcsInfo The VMCS info. object.
2643 * @param iSegReg The segment register number (X86_SREG_XXX).
2644 * @param pSelReg Pointer to the segment selector.
2645 *
2646 * @remarks No-long-jump zone!!!
2647 */
2648static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2649{
2650 Assert(iSegReg < X86_SREG_COUNT);
2651
2652 uint32_t u32Access = pSelReg->Attr.u;
2653#ifndef IN_NEM_DARWIN
2654 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2655#endif
2656 {
2657 /*
2658 * The way to differentiate between whether this is really a null selector or was just
2659 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2660 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2661 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2662 * NULL selectors loaded in protected-mode have their attribute as 0.
2663 */
2664 if (u32Access)
2665 { }
2666 else
2667 u32Access = X86DESCATTR_UNUSABLE;
2668 }
2669#ifndef IN_NEM_DARWIN
2670 else
2671 {
2672 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2673 u32Access = 0xf3;
2674 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2675 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2676 RT_NOREF_PV(pVCpu);
2677 }
2678#else
2679 RT_NOREF(pVmcsInfo);
2680#endif
2681
2682 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2683 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2684 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2685
2686 /*
2687 * Commit it to the VMCS.
2688 */
2689 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
2690 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
2691 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
2692 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
2693 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2694 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2695 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2696 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2697 return VINF_SUCCESS;
2698}
2699
2700
2701/**
2702 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2703 * area in the VMCS.
2704 *
2705 * @returns VBox status code.
2706 * @param pVCpu The cross context virtual CPU structure.
2707 * @param pVmxTransient The VMX-transient structure.
2708 *
2709 * @remarks Will import guest CR0 on strict builds during validation of
2710 * segments.
2711 * @remarks No-long-jump zone!!!
2712 */
2713static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2714{
2715 int rc = VERR_INTERNAL_ERROR_5;
2716#ifndef IN_NEM_DARWIN
2717 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2718#endif
2719 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2720 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2721#ifndef IN_NEM_DARWIN
2722 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2723#endif
2724
2725 /*
2726 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2727 */
2728 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2729 {
2730 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2731 {
2732 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2733#ifndef IN_NEM_DARWIN
2734 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2735 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2736#endif
2737 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2738 AssertRC(rc);
2739 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2740 }
2741
2742 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2743 {
2744 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2745#ifndef IN_NEM_DARWIN
2746 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2747 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2748#endif
2749 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2750 AssertRC(rc);
2751 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2752 }
2753
2754 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2755 {
2756 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2757#ifndef IN_NEM_DARWIN
2758 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2759 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2760#endif
2761 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2762 AssertRC(rc);
2763 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2764 }
2765
2766 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2767 {
2768 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2769#ifndef IN_NEM_DARWIN
2770 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2771 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2772#endif
2773 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2774 AssertRC(rc);
2775 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2776 }
2777
2778 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2779 {
2780 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2781#ifndef IN_NEM_DARWIN
2782 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2783 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2784#endif
2785 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2786 AssertRC(rc);
2787 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2788 }
2789
2790 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2791 {
2792 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2793#ifndef IN_NEM_DARWIN
2794 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2795 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2796#endif
2797 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2798 AssertRC(rc);
2799 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2800 }
2801
2802#ifdef VBOX_STRICT
2803 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2804#endif
2805 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2806 pCtx->cs.Attr.u));
2807 }
2808
2809 /*
2810 * Guest TR.
2811 */
2812 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2813 {
2814 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2815
2816 /*
2817 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2818 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2819 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2820 */
2821 uint16_t u16Sel;
2822 uint32_t u32Limit;
2823 uint64_t u64Base;
2824 uint32_t u32AccessRights;
2825#ifndef IN_NEM_DARWIN
2826 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2827#endif
2828 {
2829 u16Sel = pCtx->tr.Sel;
2830 u32Limit = pCtx->tr.u32Limit;
2831 u64Base = pCtx->tr.u64Base;
2832 u32AccessRights = pCtx->tr.Attr.u;
2833 }
2834#ifndef IN_NEM_DARWIN
2835 else
2836 {
2837 Assert(!pVmxTransient->fIsNestedGuest);
2838 Assert(pVM->hm.s.vmx.pRealModeTSS);
2839 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2840
2841 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2842 RTGCPHYS GCPhys;
2843 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2844 AssertRCReturn(rc, rc);
2845
2846 X86DESCATTR DescAttr;
2847 DescAttr.u = 0;
2848 DescAttr.n.u1Present = 1;
2849 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2850
2851 u16Sel = 0;
2852 u32Limit = HM_VTX_TSS_SIZE;
2853 u64Base = GCPhys;
2854 u32AccessRights = DescAttr.u;
2855 }
2856#endif
2857
2858 /* Validate. */
2859 Assert(!(u16Sel & RT_BIT(2)));
2860 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2861 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2862 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2863 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2864 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2865 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2866 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2867 Assert( (u32Limit & 0xfff) == 0xfff
2868 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2869 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2870 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2871
2872 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2873 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2874 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2875 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2876
2877 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2878 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2879 }
2880
2881 /*
2882 * Guest GDTR.
2883 */
2884 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2885 {
2886 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2887
2888 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2889 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2890
2891 /* Validate. */
2892 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2893
2894 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2895 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2896 }
2897
2898 /*
2899 * Guest LDTR.
2900 */
2901 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2902 {
2903 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2904
2905 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2906 uint32_t u32Access;
2907 if ( !pVmxTransient->fIsNestedGuest
2908 && !pCtx->ldtr.Attr.u)
2909 u32Access = X86DESCATTR_UNUSABLE;
2910 else
2911 u32Access = pCtx->ldtr.Attr.u;
2912
2913 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2914 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2915 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2916 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2917
2918 /* Validate. */
2919 if (!(u32Access & X86DESCATTR_UNUSABLE))
2920 {
2921 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2922 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2923 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2924 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2925 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2926 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2927 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2928 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2929 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2930 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2931 }
2932
2933 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2934 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2935 }
2936
2937 /*
2938 * Guest IDTR.
2939 */
2940 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2941 {
2942 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2943
2944 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2945 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2946
2947 /* Validate. */
2948 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2949
2950 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2951 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2952 }
2953
2954 return VINF_SUCCESS;
2955}
2956
2957
2958/**
2959 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2960 * VM-exit interruption info type.
2961 *
2962 * @returns The IEM exception flags.
2963 * @param uVector The event vector.
2964 * @param uVmxEventType The VMX event type.
2965 *
2966 * @remarks This function currently only constructs flags required for
2967 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2968 * and CR2 aspects of an exception are not included).
2969 */
2970static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2971{
2972 uint32_t fIemXcptFlags;
2973 switch (uVmxEventType)
2974 {
2975 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2976 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2977 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2978 break;
2979
2980 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2981 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2982 break;
2983
2984 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2985 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2986 break;
2987
2988 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2989 {
2990 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2991 if (uVector == X86_XCPT_BP)
2992 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2993 else if (uVector == X86_XCPT_OF)
2994 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2995 else
2996 {
2997 fIemXcptFlags = 0;
2998 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2999 }
3000 break;
3001 }
3002
3003 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
3004 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3005 break;
3006
3007 default:
3008 fIemXcptFlags = 0;
3009 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3010 break;
3011 }
3012 return fIemXcptFlags;
3013}
3014
3015
3016/**
3017 * Sets an event as a pending event to be injected into the guest.
3018 *
3019 * @param pVCpu The cross context virtual CPU structure.
3020 * @param u32IntInfo The VM-entry interruption-information field.
3021 * @param cbInstr The VM-entry instruction length in bytes (for
3022 * software interrupts, exceptions and privileged
3023 * software exceptions).
3024 * @param u32ErrCode The VM-entry exception error code.
3025 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3026 * page-fault.
3027 */
3028DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3029 RTGCUINTPTR GCPtrFaultAddress)
3030{
3031 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3032 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3033 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3034 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3035 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3036 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3037}
3038
3039
3040/**
3041 * Sets an external interrupt as pending-for-injection into the VM.
3042 *
3043 * @param pVCpu The cross context virtual CPU structure.
3044 * @param u8Interrupt The external interrupt vector.
3045 */
3046DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3047{
3048 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3049 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3050 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3051 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3052 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3053}
3054
3055
3056/**
3057 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3058 *
3059 * @param pVCpu The cross context virtual CPU structure.
3060 */
3061DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3062{
3063 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3064 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3065 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3066 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3067 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3068}
3069
3070
3071/**
3072 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3073 *
3074 * @param pVCpu The cross context virtual CPU structure.
3075 */
3076DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3077{
3078 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3079 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3080 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3081 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3082 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3083}
3084
3085
3086/**
3087 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3088 *
3089 * @param pVCpu The cross context virtual CPU structure.
3090 */
3091DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3092{
3093 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3094 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3095 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3096 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3097 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3098}
3099
3100
3101/**
3102 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3103 *
3104 * @param pVCpu The cross context virtual CPU structure.
3105 */
3106DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3107{
3108 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3109 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3110 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3111 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3112 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3113}
3114
3115
3116#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3117/**
3118 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3119 *
3120 * @param pVCpu The cross context virtual CPU structure.
3121 * @param u32ErrCode The error code for the general-protection exception.
3122 */
3123DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3124{
3125 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3126 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3127 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3128 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3129 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3130}
3131
3132
3133/**
3134 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3135 *
3136 * @param pVCpu The cross context virtual CPU structure.
3137 * @param u32ErrCode The error code for the stack exception.
3138 */
3139DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3140{
3141 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3142 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3143 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3144 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3145 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3146}
3147#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3148
3149
3150/**
3151 * Fixes up attributes for the specified segment register.
3152 *
3153 * @param pVCpu The cross context virtual CPU structure.
3154 * @param pSelReg The segment register that needs fixing.
3155 * @param pszRegName The register name (for logging and assertions).
3156 */
3157static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3158{
3159 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3160
3161 /*
3162 * If VT-x marks the segment as unusable, most other bits remain undefined:
3163 * - For CS the L, D and G bits have meaning.
3164 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3165 * - For the remaining data segments no bits are defined.
3166 *
3167 * The present bit and the unusable bit has been observed to be set at the
3168 * same time (the selector was supposed to be invalid as we started executing
3169 * a V8086 interrupt in ring-0).
3170 *
3171 * What should be important for the rest of the VBox code, is that the P bit is
3172 * cleared. Some of the other VBox code recognizes the unusable bit, but
3173 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3174 * safe side here, we'll strip off P and other bits we don't care about. If
3175 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3176 *
3177 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3178 */
3179#ifdef VBOX_STRICT
3180 uint32_t const uAttr = pSelReg->Attr.u;
3181#endif
3182
3183 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3184 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3185 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3186
3187#ifdef VBOX_STRICT
3188# ifndef IN_NEM_DARWIN
3189 VMMRZCallRing3Disable(pVCpu);
3190# endif
3191 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3192# ifdef DEBUG_bird
3193 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3194 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3195 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3196# endif
3197# ifndef IN_NEM_DARWIN
3198 VMMRZCallRing3Enable(pVCpu);
3199# endif
3200 NOREF(uAttr);
3201#endif
3202 RT_NOREF2(pVCpu, pszRegName);
3203}
3204
3205
3206/**
3207 * Imports a guest segment register from the current VMCS into the guest-CPU
3208 * context.
3209 *
3210 * @param pVCpu The cross context virtual CPU structure.
3211 * @param iSegReg The segment register number (X86_SREG_XXX).
3212 *
3213 * @remarks Called with interrupts and/or preemption disabled.
3214 */
3215static void vmxHCImportGuestSegReg(PVMCPUCC pVCpu, uint32_t iSegReg)
3216{
3217 Assert(iSegReg < X86_SREG_COUNT);
3218 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
3219 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
3220 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
3221 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
3222
3223 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
3224
3225 uint16_t u16Sel;
3226 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), &u16Sel); AssertRC(rc);
3227 pSelReg->Sel = u16Sel;
3228 pSelReg->ValidSel = u16Sel;
3229
3230 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3231 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), &pSelReg->u64Base); AssertRC(rc);
3232
3233 uint32_t u32Attr;
3234 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), &u32Attr); AssertRC(rc);
3235 pSelReg->Attr.u = u32Attr;
3236 if (u32Attr & X86DESCATTR_UNUSABLE)
3237 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + iSegReg * 3);
3238
3239 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3240}
3241
3242
3243/**
3244 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3245 *
3246 * @param pVCpu The cross context virtual CPU structure.
3247 *
3248 * @remarks Called with interrupts and/or preemption disabled.
3249 */
3250static void vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3251{
3252 uint16_t u16Sel;
3253 uint64_t u64Base;
3254 uint32_t u32Limit, u32Attr;
3255 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3256 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3257 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3258 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3259
3260 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3261 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3262 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3263 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3264 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3265 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3266 if (u32Attr & X86DESCATTR_UNUSABLE)
3267 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3268}
3269
3270
3271/**
3272 * Imports the guest TR from the current VMCS into the guest-CPU context.
3273 *
3274 * @param pVCpu The cross context virtual CPU structure.
3275 *
3276 * @remarks Called with interrupts and/or preemption disabled.
3277 */
3278static void vmxHCImportGuestTr(PVMCPUCC pVCpu)
3279{
3280 uint16_t u16Sel;
3281 uint64_t u64Base;
3282 uint32_t u32Limit, u32Attr;
3283 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3284 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3285 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3286 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3287
3288 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3289 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3290 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3291 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3292 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3293 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3294 /* TR is the only selector that can never be unusable. */
3295 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3296}
3297
3298
3299/**
3300 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3301 *
3302 * @param pVCpu The cross context virtual CPU structure.
3303 *
3304 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3305 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3306 * instead!!!
3307 */
3308static void vmxHCImportGuestRip(PVMCPUCC pVCpu)
3309{
3310 uint64_t u64Val;
3311 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3312 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
3313 {
3314 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3315 AssertRC(rc);
3316
3317 pCtx->rip = u64Val;
3318 EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
3319 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
3320 }
3321}
3322
3323
3324/**
3325 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3326 *
3327 * @param pVCpu The cross context virtual CPU structure.
3328 * @param pVmcsInfo The VMCS info. object.
3329 *
3330 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3331 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3332 * instead!!!
3333 */
3334static void vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3335{
3336 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3337 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
3338 {
3339 uint64_t u64Val;
3340 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3341 AssertRC(rc);
3342
3343 pCtx->rflags.u64 = u64Val;
3344#ifndef IN_NEM_DARWIN
3345 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3346 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3347 {
3348 pCtx->eflags.Bits.u1VM = 0;
3349 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3350 }
3351#else
3352 RT_NOREF(pVmcsInfo);
3353#endif
3354 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3355 }
3356}
3357
3358
3359/**
3360 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3361 * context.
3362 *
3363 * @param pVCpu The cross context virtual CPU structure.
3364 * @param pVmcsInfo The VMCS info. object.
3365 *
3366 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3367 * do not log!
3368 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3369 * instead!!!
3370 */
3371static void vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3372{
3373 uint32_t u32Val;
3374 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3375 if (!u32Val)
3376 {
3377 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3378 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3379 CPUMSetGuestNmiBlocking(pVCpu, false);
3380 }
3381 else
3382 {
3383 /*
3384 * We must import RIP here to set our EM interrupt-inhibited state.
3385 * We also import RFLAGS as our code that evaluates pending interrupts
3386 * before VM-entry requires it.
3387 */
3388 vmxHCImportGuestRip(pVCpu);
3389 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3390
3391 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3392 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
3393 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3394 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3395
3396 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
3397 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
3398 }
3399}
3400
3401
3402/**
3403 * Worker for VMXR0ImportStateOnDemand.
3404 *
3405 * @returns VBox status code.
3406 * @param pVCpu The cross context virtual CPU structure.
3407 * @param pVmcsInfo The VMCS info. object.
3408 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3409 */
3410static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3411{
3412 int rc = VINF_SUCCESS;
3413 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3414 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3415 uint32_t u32Val;
3416
3417 /*
3418 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3419 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3420 * neither are other host platforms.
3421 *
3422 * Committing this temporarily as it prevents BSOD.
3423 *
3424 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3425 */
3426# ifdef RT_OS_WINDOWS
3427 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3428 return VERR_HM_IPE_1;
3429# endif
3430
3431 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3432
3433#ifndef IN_NEM_DARWIN
3434 /*
3435 * We disable interrupts to make the updating of the state and in particular
3436 * the fExtrn modification atomic wrt to preemption hooks.
3437 */
3438 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3439#endif
3440
3441 fWhat &= pCtx->fExtrn;
3442 if (fWhat)
3443 {
3444 do
3445 {
3446 if (fWhat & CPUMCTX_EXTRN_RIP)
3447 vmxHCImportGuestRip(pVCpu);
3448
3449 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3450 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3451
3452 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3453 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3454
3455 if (fWhat & CPUMCTX_EXTRN_RSP)
3456 {
3457 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3458 AssertRC(rc);
3459 }
3460
3461 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3462 {
3463 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3464#ifndef IN_NEM_DARWIN
3465 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3466#else
3467 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3468#endif
3469 if (fWhat & CPUMCTX_EXTRN_CS)
3470 {
3471 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
3472 vmxHCImportGuestRip(pVCpu);
3473 if (fRealOnV86Active)
3474 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3475 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3476 }
3477 if (fWhat & CPUMCTX_EXTRN_SS)
3478 {
3479 vmxHCImportGuestSegReg(pVCpu, X86_SREG_SS);
3480 if (fRealOnV86Active)
3481 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3482 }
3483 if (fWhat & CPUMCTX_EXTRN_DS)
3484 {
3485 vmxHCImportGuestSegReg(pVCpu, X86_SREG_DS);
3486 if (fRealOnV86Active)
3487 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3488 }
3489 if (fWhat & CPUMCTX_EXTRN_ES)
3490 {
3491 vmxHCImportGuestSegReg(pVCpu, X86_SREG_ES);
3492 if (fRealOnV86Active)
3493 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3494 }
3495 if (fWhat & CPUMCTX_EXTRN_FS)
3496 {
3497 vmxHCImportGuestSegReg(pVCpu, X86_SREG_FS);
3498 if (fRealOnV86Active)
3499 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3500 }
3501 if (fWhat & CPUMCTX_EXTRN_GS)
3502 {
3503 vmxHCImportGuestSegReg(pVCpu, X86_SREG_GS);
3504 if (fRealOnV86Active)
3505 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3506 }
3507 }
3508
3509 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3510 {
3511 if (fWhat & CPUMCTX_EXTRN_LDTR)
3512 vmxHCImportGuestLdtr(pVCpu);
3513
3514 if (fWhat & CPUMCTX_EXTRN_GDTR)
3515 {
3516 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3517 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3518 pCtx->gdtr.cbGdt = u32Val;
3519 }
3520
3521 /* Guest IDTR. */
3522 if (fWhat & CPUMCTX_EXTRN_IDTR)
3523 {
3524 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3525 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3526 pCtx->idtr.cbIdt = u32Val;
3527 }
3528
3529 /* Guest TR. */
3530 if (fWhat & CPUMCTX_EXTRN_TR)
3531 {
3532#ifndef IN_NEM_DARWIN
3533 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3534 don't need to import that one. */
3535 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3536#endif
3537 vmxHCImportGuestTr(pVCpu);
3538 }
3539 }
3540
3541 if (fWhat & CPUMCTX_EXTRN_DR7)
3542 {
3543#ifndef IN_NEM_DARWIN
3544 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3545#endif
3546 {
3547 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3548 AssertRC(rc);
3549 }
3550 }
3551
3552 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3553 {
3554 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3555 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3556 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3557 pCtx->SysEnter.cs = u32Val;
3558 }
3559
3560#ifndef IN_NEM_DARWIN
3561 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3562 {
3563 if ( pVM->hmr0.s.fAllow64BitGuests
3564 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3565 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3566 }
3567
3568 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3569 {
3570 if ( pVM->hmr0.s.fAllow64BitGuests
3571 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3572 {
3573 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3574 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3575 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3576 }
3577 }
3578
3579 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3580 {
3581 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3582 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3583 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3584 Assert(pMsrs);
3585 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3586 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3587 for (uint32_t i = 0; i < cMsrs; i++)
3588 {
3589 uint32_t const idMsr = pMsrs[i].u32Msr;
3590 switch (idMsr)
3591 {
3592 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3593 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3594 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3595 default:
3596 {
3597 uint32_t idxLbrMsr;
3598 if (VM_IS_VMX_LBR(pVM))
3599 {
3600 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3601 {
3602 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3603 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3604 break;
3605 }
3606 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3607 {
3608 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3609 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3610 break;
3611 }
3612 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3613 {
3614 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3615 break;
3616 }
3617 /* Fallthru (no break) */
3618 }
3619 pCtx->fExtrn = 0;
3620 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3621 ASMSetFlags(fEFlags);
3622 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3623 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3624 }
3625 }
3626 }
3627 }
3628#endif
3629
3630 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3631 {
3632 if (fWhat & CPUMCTX_EXTRN_CR0)
3633 {
3634 uint64_t u64Cr0;
3635 uint64_t u64Shadow;
3636 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3637 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3638#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3639 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3640 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3641#else
3642 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3643 {
3644 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3645 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3646 }
3647 else
3648 {
3649 /*
3650 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3651 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3652 * re-construct CR0. See @bugref{9180#c95} for details.
3653 */
3654 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3655 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3656 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3657 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3658 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3659 }
3660#endif
3661#ifndef IN_NEM_DARWIN
3662 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3663#endif
3664 CPUMSetGuestCR0(pVCpu, u64Cr0);
3665#ifndef IN_NEM_DARWIN
3666 VMMRZCallRing3Enable(pVCpu);
3667#endif
3668 }
3669
3670 if (fWhat & CPUMCTX_EXTRN_CR4)
3671 {
3672 uint64_t u64Cr4;
3673 uint64_t u64Shadow;
3674 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3675 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3676#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3677 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3678 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3679#else
3680 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3681 {
3682 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3683 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3684 }
3685 else
3686 {
3687 /*
3688 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3689 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3690 * re-construct CR4. See @bugref{9180#c95} for details.
3691 */
3692 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3693 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3694 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3695 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3696 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3697 }
3698#endif
3699 pCtx->cr4 = u64Cr4;
3700 }
3701
3702 if (fWhat & CPUMCTX_EXTRN_CR3)
3703 {
3704 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3705 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3706 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3707 && CPUMIsGuestPagingEnabledEx(pCtx)))
3708 {
3709 uint64_t u64Cr3;
3710 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3711 if (pCtx->cr3 != u64Cr3)
3712 {
3713 pCtx->cr3 = u64Cr3;
3714 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3715 }
3716
3717 /*
3718 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3719 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3720 */
3721 if (CPUMIsGuestInPAEModeEx(pCtx))
3722 {
3723 X86PDPE aPaePdpes[4];
3724 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3725 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3726 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3727 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3728 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3729 {
3730 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3731 /* PGM now updates PAE PDPTEs while updating CR3. */
3732 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3733 }
3734 }
3735 }
3736 }
3737 }
3738
3739#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3740 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3741 {
3742 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3743 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3744 {
3745 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3746 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3747 if (RT_SUCCESS(rc))
3748 { /* likely */ }
3749 else
3750 break;
3751 }
3752 }
3753#endif
3754 } while (0);
3755
3756 if (RT_SUCCESS(rc))
3757 {
3758 /* Update fExtrn. */
3759 pCtx->fExtrn &= ~fWhat;
3760
3761 /* If everything has been imported, clear the HM keeper bit. */
3762 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3763 {
3764#ifndef IN_NEM_DARWIN
3765 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3766#else
3767 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3768#endif
3769 Assert(!pCtx->fExtrn);
3770 }
3771 }
3772 }
3773#ifndef IN_NEM_DARWIN
3774 else
3775 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3776
3777 /*
3778 * Restore interrupts.
3779 */
3780 ASMSetFlags(fEFlags);
3781#endif
3782
3783 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3784
3785 if (RT_SUCCESS(rc))
3786 { /* likely */ }
3787 else
3788 return rc;
3789
3790 /*
3791 * Honor any pending CR3 updates.
3792 *
3793 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3794 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3795 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3796 *
3797 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3798 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3799 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3800 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3801 *
3802 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3803 *
3804 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3805 */
3806 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3807#ifndef IN_NEM_DARWIN
3808 && VMMRZCallRing3IsEnabled(pVCpu)
3809#endif
3810 )
3811 {
3812 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3813 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3814 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3815 }
3816
3817 return VINF_SUCCESS;
3818}
3819
3820
3821/**
3822 * Check per-VM and per-VCPU force flag actions that require us to go back to
3823 * ring-3 for one reason or another.
3824 *
3825 * @returns Strict VBox status code (i.e. informational status codes too)
3826 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3827 * ring-3.
3828 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3829 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3830 * interrupts)
3831 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3832 * all EMTs to be in ring-3.
3833 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3834 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3835 * to the EM loop.
3836 *
3837 * @param pVCpu The cross context virtual CPU structure.
3838 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
3839 * @param fStepping Whether we are single-stepping the guest using the
3840 * hypervisor debugger.
3841 *
3842 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
3843 * is no longer in VMX non-root mode.
3844 */
3845static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
3846{
3847#ifndef IN_NEM_DARWIN
3848 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3849#endif
3850
3851 /*
3852 * Update pending interrupts into the APIC's IRR.
3853 */
3854 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3855 APICUpdatePendingInterrupts(pVCpu);
3856
3857 /*
3858 * Anything pending? Should be more likely than not if we're doing a good job.
3859 */
3860 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3861 if ( !fStepping
3862 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
3863 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
3864 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
3865 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3866 return VINF_SUCCESS;
3867
3868 /* Pending PGM C3 sync. */
3869 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
3870 {
3871 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3872 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
3873 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
3874 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
3875 if (rcStrict != VINF_SUCCESS)
3876 {
3877 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
3878 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
3879 return rcStrict;
3880 }
3881 }
3882
3883 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
3884 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
3885 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3886 {
3887 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
3888 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
3889 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
3890 return rc;
3891 }
3892
3893 /* Pending VM request packets, such as hardware interrupts. */
3894 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
3895 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
3896 {
3897 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
3898 Log4Func(("Pending VM request forcing us back to ring-3\n"));
3899 return VINF_EM_PENDING_REQUEST;
3900 }
3901
3902 /* Pending PGM pool flushes. */
3903 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3904 {
3905 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
3906 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
3907 return VINF_PGM_POOL_FLUSH_PENDING;
3908 }
3909
3910 /* Pending DMA requests. */
3911 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
3912 {
3913 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
3914 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
3915 return VINF_EM_RAW_TO_R3;
3916 }
3917
3918#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3919 /*
3920 * Pending nested-guest events.
3921 *
3922 * Please note the priority of these events are specified and important.
3923 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
3924 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
3925 */
3926 if (fIsNestedGuest)
3927 {
3928 /* Pending nested-guest APIC-write. */
3929 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3930 {
3931 Log4Func(("Pending nested-guest APIC-write\n"));
3932 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
3933 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3934 return rcStrict;
3935 }
3936
3937 /* Pending nested-guest monitor-trap flag (MTF). */
3938 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
3939 {
3940 Log4Func(("Pending nested-guest MTF\n"));
3941 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
3942 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3943 return rcStrict;
3944 }
3945
3946 /* Pending nested-guest VMX-preemption timer expired. */
3947 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
3948 {
3949 Log4Func(("Pending nested-guest preempt timer\n"));
3950 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
3951 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3952 return rcStrict;
3953 }
3954 }
3955#else
3956 NOREF(fIsNestedGuest);
3957#endif
3958
3959 return VINF_SUCCESS;
3960}
3961
3962
3963/**
3964 * Converts any TRPM trap into a pending HM event. This is typically used when
3965 * entering from ring-3 (not longjmp returns).
3966 *
3967 * @param pVCpu The cross context virtual CPU structure.
3968 */
3969static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
3970{
3971 Assert(TRPMHasTrap(pVCpu));
3972 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3973
3974 uint8_t uVector;
3975 TRPMEVENT enmTrpmEvent;
3976 uint32_t uErrCode;
3977 RTGCUINTPTR GCPtrFaultAddress;
3978 uint8_t cbInstr;
3979 bool fIcebp;
3980
3981 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
3982 AssertRC(rc);
3983
3984 uint32_t u32IntInfo;
3985 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
3986 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
3987
3988 rc = TRPMResetTrap(pVCpu);
3989 AssertRC(rc);
3990 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
3991 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
3992
3993 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
3994}
3995
3996
3997/**
3998 * Converts the pending HM event into a TRPM trap.
3999 *
4000 * @param pVCpu The cross context virtual CPU structure.
4001 */
4002static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4003{
4004 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4005
4006 /* If a trap was already pending, we did something wrong! */
4007 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4008
4009 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4010 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4011 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4012
4013 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4014
4015 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4016 AssertRC(rc);
4017
4018 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4019 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4020
4021 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4022 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4023 else
4024 {
4025 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4026 switch (uVectorType)
4027 {
4028 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4029 TRPMSetTrapDueToIcebp(pVCpu);
4030 RT_FALL_THRU();
4031 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4032 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4033 {
4034 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4035 || ( uVector == X86_XCPT_BP /* INT3 */
4036 || uVector == X86_XCPT_OF /* INTO */
4037 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4038 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4039 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4040 break;
4041 }
4042 }
4043 }
4044
4045 /* We're now done converting the pending event. */
4046 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4047}
4048
4049
4050/**
4051 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4052 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4053 *
4054 * @param pVCpu The cross context virtual CPU structure.
4055 * @param pVmcsInfo The VMCS info. object.
4056 */
4057static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4058{
4059 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4060 {
4061 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4062 {
4063 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4064 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4065 AssertRC(rc);
4066 }
4067 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4068}
4069
4070
4071/**
4072 * Clears the interrupt-window exiting control in the VMCS.
4073 *
4074 * @param pVCpu The cross context virtual CPU structure.
4075 * @param pVmcsInfo The VMCS info. object.
4076 */
4077DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4078{
4079 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4080 {
4081 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4082 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4083 AssertRC(rc);
4084 }
4085}
4086
4087
4088/**
4089 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4090 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4091 *
4092 * @param pVCpu The cross context virtual CPU structure.
4093 * @param pVmcsInfo The VMCS info. object.
4094 */
4095static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4096{
4097 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4098 {
4099 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4100 {
4101 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4102 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4103 AssertRC(rc);
4104 Log4Func(("Setup NMI-window exiting\n"));
4105 }
4106 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4107}
4108
4109
4110/**
4111 * Clears the NMI-window exiting control in the VMCS.
4112 *
4113 * @param pVCpu The cross context virtual CPU structure.
4114 * @param pVmcsInfo The VMCS info. object.
4115 */
4116DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4117{
4118 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4119 {
4120 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4121 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4122 AssertRC(rc);
4123 }
4124}
4125
4126
4127/**
4128 * Injects an event into the guest upon VM-entry by updating the relevant fields
4129 * in the VM-entry area in the VMCS.
4130 *
4131 * @returns Strict VBox status code (i.e. informational status codes too).
4132 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4133 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4134 *
4135 * @param pVCpu The cross context virtual CPU structure.
4136 * @param pVmcsInfo The VMCS info object.
4137 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4138 * @param pEvent The event being injected.
4139 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4140 * will be updated if necessary. This cannot not be NULL.
4141 * @param fStepping Whether we're single-stepping guest execution and should
4142 * return VINF_EM_DBG_STEPPED if the event is injected
4143 * directly (registers modified by us, not by hardware on
4144 * VM-entry).
4145 */
4146static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4147 bool fStepping, uint32_t *pfIntrState)
4148{
4149 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4150 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4151 Assert(pfIntrState);
4152
4153#ifdef IN_NEM_DARWIN
4154 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4155#endif
4156
4157 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4158 uint32_t u32IntInfo = pEvent->u64IntInfo;
4159 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4160 uint32_t const cbInstr = pEvent->cbInstr;
4161 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4162 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4163 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4164
4165#ifdef VBOX_STRICT
4166 /*
4167 * Validate the error-code-valid bit for hardware exceptions.
4168 * No error codes for exceptions in real-mode.
4169 *
4170 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4171 */
4172 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4173 && !CPUMIsGuestInRealModeEx(pCtx))
4174 {
4175 switch (uVector)
4176 {
4177 case X86_XCPT_PF:
4178 case X86_XCPT_DF:
4179 case X86_XCPT_TS:
4180 case X86_XCPT_NP:
4181 case X86_XCPT_SS:
4182 case X86_XCPT_GP:
4183 case X86_XCPT_AC:
4184 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4185 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4186 RT_FALL_THRU();
4187 default:
4188 break;
4189 }
4190 }
4191
4192 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4193 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4194 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4195#endif
4196
4197 RT_NOREF(uVector);
4198 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4199 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4200 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4201 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4202 {
4203 Assert(uVector <= X86_XCPT_LAST);
4204 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4205 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4206 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4207 }
4208 else
4209 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4210
4211 /*
4212 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4213 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4214 * interrupt handler in the (real-mode) guest.
4215 *
4216 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4217 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4218 */
4219 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4220 {
4221#ifndef IN_NEM_DARWIN
4222 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4223#endif
4224 {
4225 /*
4226 * For CPUs with unrestricted guest execution enabled and with the guest
4227 * in real-mode, we must not set the deliver-error-code bit.
4228 *
4229 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4230 */
4231 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4232 }
4233#ifndef IN_NEM_DARWIN
4234 else
4235 {
4236 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4237 Assert(PDMVmmDevHeapIsEnabled(pVM));
4238 Assert(pVM->hm.s.vmx.pRealModeTSS);
4239 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4240
4241 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4242 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4243 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4244 AssertRCReturn(rc2, rc2);
4245
4246 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4247 size_t const cbIdtEntry = sizeof(X86IDTR16);
4248 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4249 {
4250 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4251 if (uVector == X86_XCPT_DF)
4252 return VINF_EM_RESET;
4253
4254 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4255 No error codes for exceptions in real-mode. */
4256 if (uVector == X86_XCPT_GP)
4257 {
4258 uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4259 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4260 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4261 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4262 HMEVENT EventXcptDf;
4263 RT_ZERO(EventXcptDf);
4264 EventXcptDf.u64IntInfo = uXcptDfInfo;
4265 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptDf, fStepping, pfIntrState);
4266 }
4267
4268 /*
4269 * If we're injecting an event with no valid IDT entry, inject a #GP.
4270 * No error codes for exceptions in real-mode.
4271 *
4272 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4273 */
4274 uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4275 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4276 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4277 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4278 HMEVENT EventXcptGp;
4279 RT_ZERO(EventXcptGp);
4280 EventXcptGp.u64IntInfo = uXcptGpInfo;
4281 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptGp, fStepping, pfIntrState);
4282 }
4283
4284 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4285 uint16_t uGuestIp = pCtx->ip;
4286 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4287 {
4288 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4289 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4290 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4291 }
4292 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4293 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4294
4295 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4296 X86IDTR16 IdtEntry;
4297 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4298 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4299 AssertRCReturn(rc2, rc2);
4300
4301 /* Construct the stack frame for the interrupt/exception handler. */
4302 VBOXSTRICTRC rcStrict;
4303 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4304 if (rcStrict == VINF_SUCCESS)
4305 {
4306 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4307 if (rcStrict == VINF_SUCCESS)
4308 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4309 }
4310
4311 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4312 if (rcStrict == VINF_SUCCESS)
4313 {
4314 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4315 pCtx->rip = IdtEntry.offSel;
4316 pCtx->cs.Sel = IdtEntry.uSel;
4317 pCtx->cs.ValidSel = IdtEntry.uSel;
4318 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4319 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4320 && uVector == X86_XCPT_PF)
4321 pCtx->cr2 = GCPtrFault;
4322
4323 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4324 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4325 | HM_CHANGED_GUEST_RSP);
4326
4327 /*
4328 * If we delivered a hardware exception (other than an NMI) and if there was
4329 * block-by-STI in effect, we should clear it.
4330 */
4331 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4332 {
4333 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4334 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4335 Log4Func(("Clearing inhibition due to STI\n"));
4336 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4337 }
4338
4339 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4340 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4341
4342 /*
4343 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4344 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4345 */
4346 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4347
4348 /*
4349 * If we eventually support nested-guest execution without unrestricted guest execution,
4350 * we should set fInterceptEvents here.
4351 */
4352 Assert(!fIsNestedGuest);
4353
4354 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4355 if (fStepping)
4356 rcStrict = VINF_EM_DBG_STEPPED;
4357 }
4358 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4359 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4360 return rcStrict;
4361 }
4362#else
4363 RT_NOREF(pVmcsInfo);
4364#endif
4365 }
4366
4367 /*
4368 * Validate.
4369 */
4370 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4371 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4372
4373 /*
4374 * Inject the event into the VMCS.
4375 */
4376 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4377 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4378 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4379 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4380 AssertRC(rc);
4381
4382 /*
4383 * Update guest CR2 if this is a page-fault.
4384 */
4385 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4386 pCtx->cr2 = GCPtrFault;
4387
4388 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4389 return VINF_SUCCESS;
4390}
4391
4392
4393/**
4394 * Evaluates the event to be delivered to the guest and sets it as the pending
4395 * event.
4396 *
4397 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4398 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4399 * NOT restore these force-flags.
4400 *
4401 * @returns Strict VBox status code (i.e. informational status codes too).
4402 * @param pVCpu The cross context virtual CPU structure.
4403 * @param pVmcsInfo The VMCS information structure.
4404 * @param fIsNestedGuest Flag whether the evaluation happens for a nestd guest.
4405 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4406 */
4407static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4408{
4409 Assert(pfIntrState);
4410 Assert(!TRPMHasTrap(pVCpu));
4411
4412 /*
4413 * Compute/update guest-interruptibility state related FFs.
4414 * The FFs will be used below while evaluating events to be injected.
4415 */
4416 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4417
4418 /*
4419 * Evaluate if a new event needs to be injected.
4420 * An event that's already pending has already performed all necessary checks.
4421 */
4422 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4423 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4424 {
4425 /** @todo SMI. SMIs take priority over NMIs. */
4426
4427 /*
4428 * NMIs.
4429 * NMIs take priority over external interrupts.
4430 */
4431#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4432 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4433#endif
4434 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4435 {
4436 /*
4437 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4438 *
4439 * For a nested-guest, the FF always indicates the outer guest's ability to
4440 * receive an NMI while the guest-interruptibility state bit depends on whether
4441 * the nested-hypervisor is using virtual-NMIs.
4442 */
4443 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4444 {
4445#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4446 if ( fIsNestedGuest
4447 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4448 return IEMExecVmxVmexitXcptNmi(pVCpu);
4449#endif
4450 vmxHCSetPendingXcptNmi(pVCpu);
4451 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4452 Log4Func(("NMI pending injection\n"));
4453
4454 /* We've injected the NMI, bail. */
4455 return VINF_SUCCESS;
4456 }
4457 else if (!fIsNestedGuest)
4458 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4459 }
4460
4461 /*
4462 * External interrupts (PIC/APIC).
4463 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4464 * We cannot re-request the interrupt from the controller again.
4465 */
4466 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4467 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4468 {
4469 Assert(!DBGFIsStepping(pVCpu));
4470 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4471 AssertRC(rc);
4472
4473 /*
4474 * We must not check EFLAGS directly when executing a nested-guest, use
4475 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4476 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4477 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4478 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4479 *
4480 * See Intel spec. 25.4.1 "Event Blocking".
4481 */
4482 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4483 {
4484#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4485 if ( fIsNestedGuest
4486 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4487 {
4488 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4489 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4490 return rcStrict;
4491 }
4492#endif
4493 uint8_t u8Interrupt;
4494 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4495 if (RT_SUCCESS(rc))
4496 {
4497#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4498 if ( fIsNestedGuest
4499 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4500 {
4501 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4502 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4503 return rcStrict;
4504 }
4505#endif
4506 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4507 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4508 }
4509 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4510 {
4511 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4512
4513 if ( !fIsNestedGuest
4514 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4515 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4516 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4517
4518 /*
4519 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4520 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4521 * need to re-set this force-flag here.
4522 */
4523 }
4524 else
4525 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4526
4527 /* We've injected the interrupt or taken necessary action, bail. */
4528 return VINF_SUCCESS;
4529 }
4530 if (!fIsNestedGuest)
4531 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4532 }
4533 }
4534 else if (!fIsNestedGuest)
4535 {
4536 /*
4537 * An event is being injected or we are in an interrupt shadow. Check if another event is
4538 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
4539 * the pending event.
4540 */
4541 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4542 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4543 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4544 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4545 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4546 }
4547 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
4548
4549 return VINF_SUCCESS;
4550}
4551
4552
4553/**
4554 * Injects any pending events into the guest if the guest is in a state to
4555 * receive them.
4556 *
4557 * @returns Strict VBox status code (i.e. informational status codes too).
4558 * @param pVCpu The cross context virtual CPU structure.
4559 * @param pVmcsInfo The VMCS information structure.
4560 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
4561 * @param fIntrState The VT-x guest-interruptibility state.
4562 * @param fStepping Whether we are single-stepping the guest using the
4563 * hypervisor debugger and should return
4564 * VINF_EM_DBG_STEPPED if the event was dispatched
4565 * directly.
4566 */
4567static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
4568 uint32_t fIntrState, bool fStepping)
4569{
4570 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
4571#ifndef IN_NEM_DARWIN
4572 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4573#endif
4574
4575#ifdef VBOX_STRICT
4576 /*
4577 * Verify guest-interruptibility state.
4578 *
4579 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
4580 * since injecting an event may modify the interruptibility state and we must thus always
4581 * use fIntrState.
4582 */
4583 {
4584 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
4585 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
4586 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
4587 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
4588 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
4589 Assert(!TRPMHasTrap(pVCpu));
4590 NOREF(fBlockMovSS); NOREF(fBlockSti);
4591 }
4592#endif
4593
4594 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4595 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
4596 {
4597 /*
4598 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
4599 * pending even while injecting an event and in this case, we want a VM-exit as soon as
4600 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
4601 *
4602 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
4603 */
4604 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
4605#ifdef VBOX_STRICT
4606 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4607 {
4608 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
4609 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4610 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4611 }
4612 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
4613 {
4614 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
4615 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4616 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4617 }
4618#endif
4619 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
4620 uIntType));
4621
4622 /*
4623 * Inject the event and get any changes to the guest-interruptibility state.
4624 *
4625 * The guest-interruptibility state may need to be updated if we inject the event
4626 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
4627 */
4628 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
4629 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
4630
4631 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4632 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
4633 else
4634 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
4635 }
4636
4637 /*
4638 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
4639 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
4640 */
4641 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
4642 && !fIsNestedGuest)
4643 {
4644 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
4645
4646 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4647 {
4648 /*
4649 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
4650 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
4651 */
4652 Assert(!DBGFIsStepping(pVCpu));
4653 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
4654 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
4655 AssertRC(rc);
4656 }
4657 else
4658 {
4659 /*
4660 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
4661 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
4662 * we take care of this case in vmxHCExportSharedDebugState and also the case if
4663 * we use MTF, so just make sure it's called before executing guest-code.
4664 */
4665 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
4666 }
4667 }
4668 /* else: for nested-guest currently handling while merging controls. */
4669
4670 /*
4671 * Finally, update the guest-interruptibility state.
4672 *
4673 * This is required for the real-on-v86 software interrupt injection, for
4674 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
4675 */
4676 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
4677 AssertRC(rc);
4678
4679 /*
4680 * There's no need to clear the VM-entry interruption-information field here if we're not
4681 * injecting anything. VT-x clears the valid bit on every VM-exit.
4682 *
4683 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
4684 */
4685
4686 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
4687 return rcStrict;
4688}
4689
4690
4691/**
4692 * Tries to determine what part of the guest-state VT-x has deemed as invalid
4693 * and update error record fields accordingly.
4694 *
4695 * @returns VMX_IGS_* error codes.
4696 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
4697 * wrong with the guest state.
4698 *
4699 * @param pVCpu The cross context virtual CPU structure.
4700 * @param pVmcsInfo The VMCS info. object.
4701 *
4702 * @remarks This function assumes our cache of the VMCS controls
4703 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
4704 */
4705static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
4706{
4707#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
4708#define HMVMX_CHECK_BREAK(expr, err) do { \
4709 if (!(expr)) { uError = (err); break; } \
4710 } while (0)
4711
4712 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4713 uint32_t uError = VMX_IGS_ERROR;
4714 uint32_t u32IntrState = 0;
4715#ifndef IN_NEM_DARWIN
4716 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4717 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
4718#else
4719 bool const fUnrestrictedGuest = true;
4720#endif
4721 do
4722 {
4723 int rc;
4724
4725 /*
4726 * Guest-interruptibility state.
4727 *
4728 * Read this first so that any check that fails prior to those that actually
4729 * require the guest-interruptibility state would still reflect the correct
4730 * VMCS value and avoids causing further confusion.
4731 */
4732 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
4733 AssertRC(rc);
4734
4735 uint32_t u32Val;
4736 uint64_t u64Val;
4737
4738 /*
4739 * CR0.
4740 */
4741 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4742 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
4743 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
4744 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
4745 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
4746 if (fUnrestrictedGuest)
4747 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
4748
4749 uint64_t u64GuestCr0;
4750 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
4751 AssertRC(rc);
4752 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
4753 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
4754 if ( !fUnrestrictedGuest
4755 && (u64GuestCr0 & X86_CR0_PG)
4756 && !(u64GuestCr0 & X86_CR0_PE))
4757 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
4758
4759 /*
4760 * CR4.
4761 */
4762 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4763 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
4764 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
4765
4766 uint64_t u64GuestCr4;
4767 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
4768 AssertRC(rc);
4769 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
4770 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
4771
4772 /*
4773 * IA32_DEBUGCTL MSR.
4774 */
4775 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
4776 AssertRC(rc);
4777 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4778 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
4779 {
4780 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
4781 }
4782 uint64_t u64DebugCtlMsr = u64Val;
4783
4784#ifdef VBOX_STRICT
4785 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
4786 AssertRC(rc);
4787 Assert(u32Val == pVmcsInfo->u32EntryCtls);
4788#endif
4789 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4790
4791 /*
4792 * RIP and RFLAGS.
4793 */
4794 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
4795 AssertRC(rc);
4796 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
4797 if ( !fLongModeGuest
4798 || !pCtx->cs.Attr.n.u1Long)
4799 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
4800 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
4801 * must be identical if the "IA-32e mode guest" VM-entry
4802 * control is 1 and CS.L is 1. No check applies if the
4803 * CPU supports 64 linear-address bits. */
4804
4805 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
4806 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
4807 AssertRC(rc);
4808 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
4809 VMX_IGS_RFLAGS_RESERVED);
4810 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
4811 uint32_t const u32Eflags = u64Val;
4812
4813 if ( fLongModeGuest
4814 || ( fUnrestrictedGuest
4815 && !(u64GuestCr0 & X86_CR0_PE)))
4816 {
4817 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
4818 }
4819
4820 uint32_t u32EntryInfo;
4821 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
4822 AssertRC(rc);
4823 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
4824 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
4825
4826 /*
4827 * 64-bit checks.
4828 */
4829 if (fLongModeGuest)
4830 {
4831 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
4832 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
4833 }
4834
4835 if ( !fLongModeGuest
4836 && (u64GuestCr4 & X86_CR4_PCIDE))
4837 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
4838
4839 /** @todo CR3 field must be such that bits 63:52 and bits in the range
4840 * 51:32 beyond the processor's physical-address width are 0. */
4841
4842 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4843 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
4844 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
4845
4846#ifndef IN_NEM_DARWIN
4847 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
4848 AssertRC(rc);
4849 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
4850
4851 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
4852 AssertRC(rc);
4853 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
4854#endif
4855
4856 /*
4857 * PERF_GLOBAL MSR.
4858 */
4859 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
4860 {
4861 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
4862 AssertRC(rc);
4863 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
4864 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
4865 }
4866
4867 /*
4868 * PAT MSR.
4869 */
4870 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4871 {
4872 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
4873 AssertRC(rc);
4874 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
4875 for (unsigned i = 0; i < 8; i++)
4876 {
4877 uint8_t u8Val = (u64Val & 0xff);
4878 if ( u8Val != 0 /* UC */
4879 && u8Val != 1 /* WC */
4880 && u8Val != 4 /* WT */
4881 && u8Val != 5 /* WP */
4882 && u8Val != 6 /* WB */
4883 && u8Val != 7 /* UC- */)
4884 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
4885 u64Val >>= 8;
4886 }
4887 }
4888
4889 /*
4890 * EFER MSR.
4891 */
4892 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4893 {
4894 Assert(g_fHmVmxSupportsVmcsEfer);
4895 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
4896 AssertRC(rc);
4897 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
4898 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
4899 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
4900 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
4901 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
4902 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
4903 * iemVmxVmentryCheckGuestState(). */
4904 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4905 || !(u64GuestCr0 & X86_CR0_PG)
4906 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
4907 VMX_IGS_EFER_LMA_LME_MISMATCH);
4908 }
4909
4910 /*
4911 * Segment registers.
4912 */
4913 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
4914 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
4915 if (!(u32Eflags & X86_EFL_VM))
4916 {
4917 /* CS */
4918 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
4919 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
4920 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
4921 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4922 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4923 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
4924 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4925 /* CS cannot be loaded with NULL in protected mode. */
4926 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
4927 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
4928 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4929 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
4930 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4931 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
4932 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
4933 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
4934 else
4935 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
4936
4937 /* SS */
4938 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4939 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
4940 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
4941 if ( !(pCtx->cr0 & X86_CR0_PE)
4942 || pCtx->cs.Attr.n.u4Type == 3)
4943 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
4944
4945 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4946 {
4947 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
4948 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
4949 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
4950 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
4951 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4952 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4953 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
4954 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4955 }
4956
4957 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
4958 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4959 {
4960 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
4961 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
4962 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4963 || pCtx->ds.Attr.n.u4Type > 11
4964 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4965 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
4966 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
4967 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4968 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4969 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
4970 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4971 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4972 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
4973 }
4974 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4975 {
4976 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
4977 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
4978 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4979 || pCtx->es.Attr.n.u4Type > 11
4980 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4981 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
4982 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
4983 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
4984 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
4985 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
4986 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
4987 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4988 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
4989 }
4990 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
4991 {
4992 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
4993 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
4994 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4995 || pCtx->fs.Attr.n.u4Type > 11
4996 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
4997 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
4998 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
4999 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5000 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5001 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5002 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5003 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5004 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5005 }
5006 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5007 {
5008 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5009 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5010 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5011 || pCtx->gs.Attr.n.u4Type > 11
5012 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5013 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5014 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5015 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5016 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5017 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5018 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5019 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5020 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5021 }
5022 /* 64-bit capable CPUs. */
5023 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5024 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5025 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5026 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5027 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5028 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5029 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5030 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5031 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5032 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5033 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5034 }
5035 else
5036 {
5037 /* V86 mode checks. */
5038 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5039 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5040 {
5041 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5042 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5043 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5044 }
5045 else
5046 {
5047 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5048 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5049 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5050 }
5051
5052 /* CS */
5053 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5054 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5055 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5056 /* SS */
5057 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5058 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5059 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5060 /* DS */
5061 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5062 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5063 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5064 /* ES */
5065 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5066 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5067 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5068 /* FS */
5069 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5070 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5071 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5072 /* GS */
5073 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5074 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5075 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5076 /* 64-bit capable CPUs. */
5077 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5078 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5079 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5080 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5081 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5082 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5083 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5084 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5085 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5086 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5087 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5088 }
5089
5090 /*
5091 * TR.
5092 */
5093 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5094 /* 64-bit capable CPUs. */
5095 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5096 if (fLongModeGuest)
5097 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5098 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5099 else
5100 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5101 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5102 VMX_IGS_TR_ATTR_TYPE_INVALID);
5103 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5104 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5105 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5106 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5107 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5108 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5109 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5110 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5111
5112 /*
5113 * GDTR and IDTR (64-bit capable checks).
5114 */
5115 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5116 AssertRC(rc);
5117 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5118
5119 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5120 AssertRC(rc);
5121 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5122
5123 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5124 AssertRC(rc);
5125 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5126
5127 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5128 AssertRC(rc);
5129 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5130
5131 /*
5132 * Guest Non-Register State.
5133 */
5134 /* Activity State. */
5135 uint32_t u32ActivityState;
5136 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5137 AssertRC(rc);
5138 HMVMX_CHECK_BREAK( !u32ActivityState
5139 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5140 VMX_IGS_ACTIVITY_STATE_INVALID);
5141 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5142 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5143
5144 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5145 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5146 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5147
5148 /** @todo Activity state and injecting interrupts. Left as a todo since we
5149 * currently don't use activity states but ACTIVE. */
5150
5151 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5152 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5153
5154 /* Guest interruptibility-state. */
5155 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5156 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5157 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5158 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5159 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5160 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5161 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5162 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5163 {
5164 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5165 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5166 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5167 }
5168 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5169 {
5170 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5171 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5172 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5173 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5174 }
5175 /** @todo Assumes the processor is not in SMM. */
5176 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5177 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5178 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5179 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5180 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5181 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5182 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5183 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5184
5185 /* Pending debug exceptions. */
5186 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5187 AssertRC(rc);
5188 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5189 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5190 u32Val = u64Val; /* For pending debug exceptions checks below. */
5191
5192 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5193 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5194 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5195 {
5196 if ( (u32Eflags & X86_EFL_TF)
5197 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5198 {
5199 /* Bit 14 is PendingDebug.BS. */
5200 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5201 }
5202 if ( !(u32Eflags & X86_EFL_TF)
5203 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5204 {
5205 /* Bit 14 is PendingDebug.BS. */
5206 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5207 }
5208 }
5209
5210#ifndef IN_NEM_DARWIN
5211 /* VMCS link pointer. */
5212 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5213 AssertRC(rc);
5214 if (u64Val != UINT64_C(0xffffffffffffffff))
5215 {
5216 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5217 /** @todo Bits beyond the processor's physical-address width MBZ. */
5218 /** @todo SMM checks. */
5219 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5220 Assert(pVmcsInfo->pvShadowVmcs);
5221 VMXVMCSREVID VmcsRevId;
5222 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5223 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5224 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5225 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5226 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5227 }
5228
5229 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5230 * not using nested paging? */
5231 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5232 && !fLongModeGuest
5233 && CPUMIsGuestInPAEModeEx(pCtx))
5234 {
5235 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5236 AssertRC(rc);
5237 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5238
5239 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5240 AssertRC(rc);
5241 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5242
5243 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5244 AssertRC(rc);
5245 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5246
5247 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5248 AssertRC(rc);
5249 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5250 }
5251#endif
5252
5253 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5254 if (uError == VMX_IGS_ERROR)
5255 uError = VMX_IGS_REASON_NOT_FOUND;
5256 } while (0);
5257
5258 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5259 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5260 return uError;
5261
5262#undef HMVMX_ERROR_BREAK
5263#undef HMVMX_CHECK_BREAK
5264}
5265
5266
5267#ifndef HMVMX_USE_FUNCTION_TABLE
5268/**
5269 * Handles a guest VM-exit from hardware-assisted VMX execution.
5270 *
5271 * @returns Strict VBox status code (i.e. informational status codes too).
5272 * @param pVCpu The cross context virtual CPU structure.
5273 * @param pVmxTransient The VMX-transient structure.
5274 */
5275DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5276{
5277#ifdef DEBUG_ramshankar
5278# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5279 do { \
5280 if (a_fSave != 0) \
5281 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
5282 VBOXSTRICTRC rcStrict = a_CallExpr; \
5283 if (a_fSave != 0) \
5284 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5285 return rcStrict; \
5286 } while (0)
5287#else
5288# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5289#endif
5290 uint32_t const uExitReason = pVmxTransient->uExitReason;
5291 switch (uExitReason)
5292 {
5293 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5294 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5295 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5296 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5297 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5298 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5299 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5300 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5301 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5302 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5303 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5304 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5305 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5306 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5307 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5308 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5309 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5310 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5311 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5312 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5313 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5314 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5315 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5316 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5317 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5318 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5319 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5320 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5321 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5322 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5323#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5324 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5325 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5326 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5327 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5328 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5329 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5330 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5331 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5332 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5333 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5334#else
5335 case VMX_EXIT_VMCLEAR:
5336 case VMX_EXIT_VMLAUNCH:
5337 case VMX_EXIT_VMPTRLD:
5338 case VMX_EXIT_VMPTRST:
5339 case VMX_EXIT_VMREAD:
5340 case VMX_EXIT_VMRESUME:
5341 case VMX_EXIT_VMWRITE:
5342 case VMX_EXIT_VMXOFF:
5343 case VMX_EXIT_VMXON:
5344 case VMX_EXIT_INVVPID:
5345 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5346#endif
5347#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT)
5348 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5349#else
5350 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5351#endif
5352
5353 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5354 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5355 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5356
5357 case VMX_EXIT_INIT_SIGNAL:
5358 case VMX_EXIT_SIPI:
5359 case VMX_EXIT_IO_SMI:
5360 case VMX_EXIT_SMI:
5361 case VMX_EXIT_ERR_MSR_LOAD:
5362 case VMX_EXIT_ERR_MACHINE_CHECK:
5363 case VMX_EXIT_PML_FULL:
5364 case VMX_EXIT_VIRTUALIZED_EOI:
5365 case VMX_EXIT_GDTR_IDTR_ACCESS:
5366 case VMX_EXIT_LDTR_TR_ACCESS:
5367 case VMX_EXIT_APIC_WRITE:
5368 case VMX_EXIT_RDRAND:
5369 case VMX_EXIT_RSM:
5370 case VMX_EXIT_VMFUNC:
5371 case VMX_EXIT_ENCLS:
5372 case VMX_EXIT_RDSEED:
5373 case VMX_EXIT_XSAVES:
5374 case VMX_EXIT_XRSTORS:
5375 case VMX_EXIT_UMWAIT:
5376 case VMX_EXIT_TPAUSE:
5377 case VMX_EXIT_LOADIWKEY:
5378 default:
5379 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5380 }
5381#undef VMEXIT_CALL_RET
5382}
5383#endif /* !HMVMX_USE_FUNCTION_TABLE */
5384
5385
5386#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5387/**
5388 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5389 *
5390 * @returns Strict VBox status code (i.e. informational status codes too).
5391 * @param pVCpu The cross context virtual CPU structure.
5392 * @param pVmxTransient The VMX-transient structure.
5393 */
5394DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5395{
5396 uint32_t const uExitReason = pVmxTransient->uExitReason;
5397 switch (uExitReason)
5398 {
5399# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5400 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5401 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5402# else
5403 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5404 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5405# endif
5406 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5407 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5408 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5409
5410 /*
5411 * We shouldn't direct host physical interrupts to the nested-guest.
5412 */
5413 case VMX_EXIT_EXT_INT:
5414 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5415
5416 /*
5417 * Instructions that cause VM-exits unconditionally or the condition is
5418 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5419 * happens, it's guaranteed to be a nested-guest VM-exit).
5420 *
5421 * - Provides VM-exit instruction length ONLY.
5422 */
5423 case VMX_EXIT_CPUID: /* Unconditional. */
5424 case VMX_EXIT_VMCALL:
5425 case VMX_EXIT_GETSEC:
5426 case VMX_EXIT_INVD:
5427 case VMX_EXIT_XSETBV:
5428 case VMX_EXIT_VMLAUNCH:
5429 case VMX_EXIT_VMRESUME:
5430 case VMX_EXIT_VMXOFF:
5431 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5432 case VMX_EXIT_VMFUNC:
5433 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5434
5435 /*
5436 * Instructions that cause VM-exits unconditionally or the condition is
5437 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5438 * happens, it's guaranteed to be a nested-guest VM-exit).
5439 *
5440 * - Provides VM-exit instruction length.
5441 * - Provides VM-exit information.
5442 * - Optionally provides Exit qualification.
5443 *
5444 * Since Exit qualification is 0 for all VM-exits where it is not
5445 * applicable, reading and passing it to the guest should produce
5446 * defined behavior.
5447 *
5448 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5449 */
5450 case VMX_EXIT_INVEPT: /* Unconditional. */
5451 case VMX_EXIT_INVVPID:
5452 case VMX_EXIT_VMCLEAR:
5453 case VMX_EXIT_VMPTRLD:
5454 case VMX_EXIT_VMPTRST:
5455 case VMX_EXIT_VMXON:
5456 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5457 case VMX_EXIT_LDTR_TR_ACCESS:
5458 case VMX_EXIT_RDRAND:
5459 case VMX_EXIT_RDSEED:
5460 case VMX_EXIT_XSAVES:
5461 case VMX_EXIT_XRSTORS:
5462 case VMX_EXIT_UMWAIT:
5463 case VMX_EXIT_TPAUSE:
5464 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5465
5466 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5467 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5468 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5469 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5470 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5471 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5472 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5473 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5474 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5475 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5476 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5477 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5478 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5479 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5480 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5481 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5482 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5483 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5484 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5485
5486 case VMX_EXIT_PREEMPT_TIMER:
5487 {
5488 /** @todo NSTVMX: Preempt timer. */
5489 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5490 }
5491
5492 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5493 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
5494
5495 case VMX_EXIT_VMREAD:
5496 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
5497
5498 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
5499 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
5500
5501 case VMX_EXIT_INIT_SIGNAL:
5502 case VMX_EXIT_SIPI:
5503 case VMX_EXIT_IO_SMI:
5504 case VMX_EXIT_SMI:
5505 case VMX_EXIT_ERR_MSR_LOAD:
5506 case VMX_EXIT_ERR_MACHINE_CHECK:
5507 case VMX_EXIT_PML_FULL:
5508 case VMX_EXIT_RSM:
5509 default:
5510 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5511 }
5512}
5513#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5514
5515
5516/** @name VM-exit helpers.
5517 * @{
5518 */
5519/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5520/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5521/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5522
5523/** Macro for VM-exits called unexpectedly. */
5524#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5525 do { \
5526 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
5527 return VERR_VMX_UNEXPECTED_EXIT; \
5528 } while (0)
5529
5530#ifdef VBOX_STRICT
5531# ifndef IN_NEM_DARWIN
5532/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5533# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
5534 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5535
5536# define HMVMX_ASSERT_PREEMPT_CPUID() \
5537 do { \
5538 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
5539 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
5540 } while (0)
5541
5542# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5543 do { \
5544 AssertPtr((a_pVCpu)); \
5545 AssertPtr((a_pVmxTransient)); \
5546 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5547 Assert((a_pVmxTransient)->pVmcsInfo); \
5548 Assert(ASMIntAreEnabled()); \
5549 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5550 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
5551 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5552 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5553 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
5554 HMVMX_ASSERT_PREEMPT_CPUID(); \
5555 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5556 } while (0)
5557# else
5558# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
5559# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
5560# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5561 do { \
5562 AssertPtr((a_pVCpu)); \
5563 AssertPtr((a_pVmxTransient)); \
5564 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5565 Assert((a_pVmxTransient)->pVmcsInfo); \
5566 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5567 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5568 } while (0)
5569# endif
5570
5571# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5572 do { \
5573 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
5574 Assert((a_pVmxTransient)->fIsNestedGuest); \
5575 } while (0)
5576
5577# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5578 do { \
5579 Log4Func(("\n")); \
5580 } while (0)
5581#else
5582# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5583 do { \
5584 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5585 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
5586 } while (0)
5587
5588# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5589 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
5590
5591# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
5592#endif
5593
5594#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5595/** Macro that does the necessary privilege checks and intercepted VM-exits for
5596 * guests that attempted to execute a VMX instruction. */
5597# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
5598 do \
5599 { \
5600 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
5601 if (rcStrictTmp == VINF_SUCCESS) \
5602 { /* likely */ } \
5603 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5604 { \
5605 Assert((a_pVCpu)->hm.s.Event.fPending); \
5606 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
5607 return VINF_SUCCESS; \
5608 } \
5609 else \
5610 { \
5611 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
5612 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
5613 } \
5614 } while (0)
5615
5616/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
5617# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
5618 do \
5619 { \
5620 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
5621 (a_pGCPtrEffAddr)); \
5622 if (rcStrictTmp == VINF_SUCCESS) \
5623 { /* likely */ } \
5624 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5625 { \
5626 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
5627 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
5628 NOREF(uXcptTmp); \
5629 return VINF_SUCCESS; \
5630 } \
5631 else \
5632 { \
5633 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
5634 return rcStrictTmp; \
5635 } \
5636 } while (0)
5637#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5638
5639
5640/**
5641 * Advances the guest RIP by the specified number of bytes.
5642 *
5643 * @param pVCpu The cross context virtual CPU structure.
5644 * @param cbInstr Number of bytes to advance the RIP by.
5645 *
5646 * @remarks No-long-jump zone!!!
5647 */
5648DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
5649{
5650 /* Advance the RIP. */
5651 pVCpu->cpum.GstCtx.rip += cbInstr;
5652 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
5653
5654 /* Update interrupt inhibition. */
5655 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5656 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
5657 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5658}
5659
5660
5661/**
5662 * Advances the guest RIP after reading it from the VMCS.
5663 *
5664 * @returns VBox status code, no informational status codes.
5665 * @param pVCpu The cross context virtual CPU structure.
5666 * @param pVmxTransient The VMX-transient structure.
5667 *
5668 * @remarks No-long-jump zone!!!
5669 */
5670static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5671{
5672 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
5673 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
5674 AssertRCReturn(rc, rc);
5675
5676 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
5677 return VINF_SUCCESS;
5678}
5679
5680
5681/**
5682 * Handle a condition that occurred while delivering an event through the guest or
5683 * nested-guest IDT.
5684 *
5685 * @returns Strict VBox status code (i.e. informational status codes too).
5686 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5687 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
5688 * to continue execution of the guest which will delivery the \#DF.
5689 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5690 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5691 *
5692 * @param pVCpu The cross context virtual CPU structure.
5693 * @param pVmxTransient The VMX-transient structure.
5694 *
5695 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
5696 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
5697 * is due to an EPT violation, PML full or SPP-related event.
5698 *
5699 * @remarks No-long-jump zone!!!
5700 */
5701static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5702{
5703 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
5704 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
5705 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5706 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5707 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5708 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
5709
5710 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5711 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5712 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
5713 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
5714 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
5715 {
5716 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
5717 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
5718
5719 /*
5720 * If the event was a software interrupt (generated with INT n) or a software exception
5721 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
5722 * can handle the VM-exit and continue guest execution which will re-execute the
5723 * instruction rather than re-injecting the exception, as that can cause premature
5724 * trips to ring-3 before injection and involve TRPM which currently has no way of
5725 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
5726 * the problem).
5727 */
5728 IEMXCPTRAISE enmRaise;
5729 IEMXCPTRAISEINFO fRaiseInfo;
5730 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5731 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5732 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5733 {
5734 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5735 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5736 }
5737 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
5738 {
5739 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
5740 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
5741 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
5742
5743 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
5744 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
5745
5746 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5747
5748 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
5749 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5750 {
5751 pVmxTransient->fVectoringPF = true;
5752 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5753 }
5754 }
5755 else
5756 {
5757 /*
5758 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
5759 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
5760 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
5761 */
5762 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5763 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5764 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
5765 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5766 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5767 }
5768
5769 /*
5770 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
5771 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
5772 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
5773 * subsequent VM-entry would fail, see @bugref{7445}.
5774 *
5775 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
5776 */
5777 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5778 && enmRaise == IEMXCPTRAISE_PREV_EVENT
5779 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5780 && CPUMIsGuestNmiBlocking(pVCpu))
5781 {
5782 CPUMSetGuestNmiBlocking(pVCpu, false);
5783 }
5784
5785 switch (enmRaise)
5786 {
5787 case IEMXCPTRAISE_CURRENT_XCPT:
5788 {
5789 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
5790 Assert(rcStrict == VINF_SUCCESS);
5791 break;
5792 }
5793
5794 case IEMXCPTRAISE_PREV_EVENT:
5795 {
5796 uint32_t u32ErrCode;
5797 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
5798 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5799 else
5800 u32ErrCode = 0;
5801
5802 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
5803 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
5804 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */,
5805 u32ErrCode, pVCpu->cpum.GstCtx.cr2);
5806
5807 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5808 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
5809 Assert(rcStrict == VINF_SUCCESS);
5810 break;
5811 }
5812
5813 case IEMXCPTRAISE_REEXEC_INSTR:
5814 Assert(rcStrict == VINF_SUCCESS);
5815 break;
5816
5817 case IEMXCPTRAISE_DOUBLE_FAULT:
5818 {
5819 /*
5820 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
5821 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
5822 */
5823 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
5824 {
5825 pVmxTransient->fVectoringDoublePF = true;
5826 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5827 pVCpu->cpum.GstCtx.cr2));
5828 rcStrict = VINF_SUCCESS;
5829 }
5830 else
5831 {
5832 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
5833 vmxHCSetPendingXcptDF(pVCpu);
5834 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5835 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5836 rcStrict = VINF_HM_DOUBLE_FAULT;
5837 }
5838 break;
5839 }
5840
5841 case IEMXCPTRAISE_TRIPLE_FAULT:
5842 {
5843 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
5844 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5845 rcStrict = VINF_EM_RESET;
5846 break;
5847 }
5848
5849 case IEMXCPTRAISE_CPU_HANG:
5850 {
5851 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
5852 rcStrict = VERR_EM_GUEST_CPU_HANG;
5853 break;
5854 }
5855
5856 default:
5857 {
5858 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
5859 rcStrict = VERR_VMX_IPE_2;
5860 break;
5861 }
5862 }
5863 }
5864 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5865 && !CPUMIsGuestNmiBlocking(pVCpu))
5866 {
5867 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
5868 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
5869 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
5870 {
5871 /*
5872 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
5873 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5874 * that virtual NMIs remain blocked until the IRET execution is completed.
5875 *
5876 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
5877 */
5878 CPUMSetGuestNmiBlocking(pVCpu, true);
5879 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5880 }
5881 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5882 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5883 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5884 {
5885 /*
5886 * Execution of IRET caused an EPT violation, page-modification log-full event or
5887 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
5888 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5889 * that virtual NMIs remain blocked until the IRET execution is completed.
5890 *
5891 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
5892 */
5893 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
5894 {
5895 CPUMSetGuestNmiBlocking(pVCpu, true);
5896 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5897 }
5898 }
5899 }
5900
5901 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
5902 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
5903 return rcStrict;
5904}
5905
5906
5907#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5908/**
5909 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
5910 * guest attempting to execute a VMX instruction.
5911 *
5912 * @returns Strict VBox status code (i.e. informational status codes too).
5913 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5914 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
5915 *
5916 * @param pVCpu The cross context virtual CPU structure.
5917 * @param uExitReason The VM-exit reason.
5918 *
5919 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
5920 * @remarks No-long-jump zone!!!
5921 */
5922static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
5923{
5924 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
5925 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
5926
5927 /*
5928 * The physical CPU would have already checked the CPU mode/code segment.
5929 * We shall just assert here for paranoia.
5930 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
5931 */
5932 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
5933 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
5934 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
5935
5936 if (uExitReason == VMX_EXIT_VMXON)
5937 {
5938 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5939
5940 /*
5941 * We check CR4.VMXE because it is required to be always set while in VMX operation
5942 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
5943 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
5944 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
5945 */
5946 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
5947 {
5948 Log4Func(("CR4.VMXE is not set -> #UD\n"));
5949 vmxHCSetPendingXcptUD(pVCpu);
5950 return VINF_HM_PENDING_XCPT;
5951 }
5952 }
5953 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
5954 {
5955 /*
5956 * The guest has not entered VMX operation but attempted to execute a VMX instruction
5957 * (other than VMXON), we need to raise a #UD.
5958 */
5959 Log4Func(("Not in VMX root mode -> #UD\n"));
5960 vmxHCSetPendingXcptUD(pVCpu);
5961 return VINF_HM_PENDING_XCPT;
5962 }
5963
5964 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
5965 return VINF_SUCCESS;
5966}
5967
5968
5969/**
5970 * Decodes the memory operand of an instruction that caused a VM-exit.
5971 *
5972 * The Exit qualification field provides the displacement field for memory
5973 * operand instructions, if any.
5974 *
5975 * @returns Strict VBox status code (i.e. informational status codes too).
5976 * @retval VINF_SUCCESS if the operand was successfully decoded.
5977 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
5978 * operand.
5979 * @param pVCpu The cross context virtual CPU structure.
5980 * @param uExitInstrInfo The VM-exit instruction information field.
5981 * @param enmMemAccess The memory operand's access type (read or write).
5982 * @param GCPtrDisp The instruction displacement field, if any. For
5983 * RIP-relative addressing pass RIP + displacement here.
5984 * @param pGCPtrMem Where to store the effective destination memory address.
5985 *
5986 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
5987 * virtual-8086 mode hence skips those checks while verifying if the
5988 * segment is valid.
5989 */
5990static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
5991 PRTGCPTR pGCPtrMem)
5992{
5993 Assert(pGCPtrMem);
5994 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
5995 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
5996 | CPUMCTX_EXTRN_CR0);
5997
5998 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
5999 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6000 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6001
6002 VMXEXITINSTRINFO ExitInstrInfo;
6003 ExitInstrInfo.u = uExitInstrInfo;
6004 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6005 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6006 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6007 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6008 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6009 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6010 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6011 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6012 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6013
6014 /*
6015 * Validate instruction information.
6016 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6017 */
6018 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6019 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6020 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6021 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6022 AssertLogRelMsgReturn(fIsMemOperand,
6023 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6024
6025 /*
6026 * Compute the complete effective address.
6027 *
6028 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6029 * See AMD spec. 4.5.2 "Segment Registers".
6030 */
6031 RTGCPTR GCPtrMem = GCPtrDisp;
6032 if (fBaseRegValid)
6033 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6034 if (fIdxRegValid)
6035 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6036
6037 RTGCPTR const GCPtrOff = GCPtrMem;
6038 if ( !fIsLongMode
6039 || iSegReg >= X86_SREG_FS)
6040 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6041 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6042
6043 /*
6044 * Validate effective address.
6045 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6046 */
6047 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6048 Assert(cbAccess > 0);
6049 if (fIsLongMode)
6050 {
6051 if (X86_IS_CANONICAL(GCPtrMem))
6052 {
6053 *pGCPtrMem = GCPtrMem;
6054 return VINF_SUCCESS;
6055 }
6056
6057 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6058 * "Data Limit Checks in 64-bit Mode". */
6059 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6060 vmxHCSetPendingXcptGP(pVCpu, 0);
6061 return VINF_HM_PENDING_XCPT;
6062 }
6063
6064 /*
6065 * This is a watered down version of iemMemApplySegment().
6066 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6067 * and segment CPL/DPL checks are skipped.
6068 */
6069 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6070 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6071 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6072
6073 /* Check if the segment is present and usable. */
6074 if ( pSel->Attr.n.u1Present
6075 && !pSel->Attr.n.u1Unusable)
6076 {
6077 Assert(pSel->Attr.n.u1DescType);
6078 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6079 {
6080 /* Check permissions for the data segment. */
6081 if ( enmMemAccess == VMXMEMACCESS_WRITE
6082 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6083 {
6084 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6085 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6086 return VINF_HM_PENDING_XCPT;
6087 }
6088
6089 /* Check limits if it's a normal data segment. */
6090 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6091 {
6092 if ( GCPtrFirst32 > pSel->u32Limit
6093 || GCPtrLast32 > pSel->u32Limit)
6094 {
6095 Log4Func(("Data segment limit exceeded. "
6096 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6097 GCPtrLast32, pSel->u32Limit));
6098 if (iSegReg == X86_SREG_SS)
6099 vmxHCSetPendingXcptSS(pVCpu, 0);
6100 else
6101 vmxHCSetPendingXcptGP(pVCpu, 0);
6102 return VINF_HM_PENDING_XCPT;
6103 }
6104 }
6105 else
6106 {
6107 /* Check limits if it's an expand-down data segment.
6108 Note! The upper boundary is defined by the B bit, not the G bit! */
6109 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6110 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6111 {
6112 Log4Func(("Expand-down data segment limit exceeded. "
6113 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6114 GCPtrLast32, pSel->u32Limit));
6115 if (iSegReg == X86_SREG_SS)
6116 vmxHCSetPendingXcptSS(pVCpu, 0);
6117 else
6118 vmxHCSetPendingXcptGP(pVCpu, 0);
6119 return VINF_HM_PENDING_XCPT;
6120 }
6121 }
6122 }
6123 else
6124 {
6125 /* Check permissions for the code segment. */
6126 if ( enmMemAccess == VMXMEMACCESS_WRITE
6127 || ( enmMemAccess == VMXMEMACCESS_READ
6128 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6129 {
6130 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6131 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6132 vmxHCSetPendingXcptGP(pVCpu, 0);
6133 return VINF_HM_PENDING_XCPT;
6134 }
6135
6136 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6137 if ( GCPtrFirst32 > pSel->u32Limit
6138 || GCPtrLast32 > pSel->u32Limit)
6139 {
6140 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6141 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6142 if (iSegReg == X86_SREG_SS)
6143 vmxHCSetPendingXcptSS(pVCpu, 0);
6144 else
6145 vmxHCSetPendingXcptGP(pVCpu, 0);
6146 return VINF_HM_PENDING_XCPT;
6147 }
6148 }
6149 }
6150 else
6151 {
6152 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6153 vmxHCSetPendingXcptGP(pVCpu, 0);
6154 return VINF_HM_PENDING_XCPT;
6155 }
6156
6157 *pGCPtrMem = GCPtrMem;
6158 return VINF_SUCCESS;
6159}
6160#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6161
6162
6163/**
6164 * VM-exit helper for LMSW.
6165 */
6166static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6167{
6168 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6169 AssertRCReturn(rc, rc);
6170
6171 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6172 AssertMsg( rcStrict == VINF_SUCCESS
6173 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6174
6175 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6176 if (rcStrict == VINF_IEM_RAISED_XCPT)
6177 {
6178 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6179 rcStrict = VINF_SUCCESS;
6180 }
6181
6182 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6183 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6184 return rcStrict;
6185}
6186
6187
6188/**
6189 * VM-exit helper for CLTS.
6190 */
6191static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6192{
6193 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6194 AssertRCReturn(rc, rc);
6195
6196 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6197 AssertMsg( rcStrict == VINF_SUCCESS
6198 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6199
6200 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6201 if (rcStrict == VINF_IEM_RAISED_XCPT)
6202 {
6203 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6204 rcStrict = VINF_SUCCESS;
6205 }
6206
6207 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6208 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6209 return rcStrict;
6210}
6211
6212
6213/**
6214 * VM-exit helper for MOV from CRx (CRx read).
6215 */
6216static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6217{
6218 Assert(iCrReg < 16);
6219 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6220
6221 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6222 AssertRCReturn(rc, rc);
6223
6224 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6225 AssertMsg( rcStrict == VINF_SUCCESS
6226 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6227
6228 if (iGReg == X86_GREG_xSP)
6229 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6230 else
6231 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6232#ifdef VBOX_WITH_STATISTICS
6233 switch (iCrReg)
6234 {
6235 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6236 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6237 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6238 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6239 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6240 }
6241#endif
6242 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6243 return rcStrict;
6244}
6245
6246
6247/**
6248 * VM-exit helper for MOV to CRx (CRx write).
6249 */
6250static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6251{
6252 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6253
6254 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6255 AssertMsg( rcStrict == VINF_SUCCESS
6256 || rcStrict == VINF_IEM_RAISED_XCPT
6257 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6258
6259 switch (iCrReg)
6260 {
6261 case 0:
6262 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6263 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6264 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6265 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6266 break;
6267
6268 case 2:
6269 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6270 /* Nothing to do here, CR2 it's not part of the VMCS. */
6271 break;
6272
6273 case 3:
6274 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6275 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6276 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6277 break;
6278
6279 case 4:
6280 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6281 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6282#ifndef IN_NEM_DARWIN
6283 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6284 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6285#else
6286 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6287#endif
6288 break;
6289
6290 case 8:
6291 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6292 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6293 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6294 break;
6295
6296 default:
6297 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6298 break;
6299 }
6300
6301 if (rcStrict == VINF_IEM_RAISED_XCPT)
6302 {
6303 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6304 rcStrict = VINF_SUCCESS;
6305 }
6306 return rcStrict;
6307}
6308
6309
6310/**
6311 * VM-exit exception handler for \#PF (Page-fault exception).
6312 *
6313 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6314 */
6315static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6316{
6317 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6318 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6319
6320#ifndef IN_NEM_DARWIN
6321 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6322 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6323 { /* likely */ }
6324 else
6325#endif
6326 {
6327#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6328 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6329#endif
6330 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6331 if (!pVmxTransient->fVectoringDoublePF)
6332 {
6333 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6334 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6335 }
6336 else
6337 {
6338 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6339 Assert(!pVmxTransient->fIsNestedGuest);
6340 vmxHCSetPendingXcptDF(pVCpu);
6341 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6342 }
6343 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6344 return VINF_SUCCESS;
6345 }
6346
6347 Assert(!pVmxTransient->fIsNestedGuest);
6348
6349 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6350 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6351 if (pVmxTransient->fVectoringPF)
6352 {
6353 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6354 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6355 }
6356
6357 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6358 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6359 AssertRCReturn(rc, rc);
6360
6361 Log4Func(("#PF: cs:rip=%#04x:%#RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
6362 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
6363
6364 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6365 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
6366
6367 Log4Func(("#PF: rc=%Rrc\n", rc));
6368 if (rc == VINF_SUCCESS)
6369 {
6370 /*
6371 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6372 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6373 */
6374 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6375 TRPMResetTrap(pVCpu);
6376 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6377 return rc;
6378 }
6379
6380 if (rc == VINF_EM_RAW_GUEST_TRAP)
6381 {
6382 if (!pVmxTransient->fVectoringDoublePF)
6383 {
6384 /* It's a guest page fault and needs to be reflected to the guest. */
6385 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6386 TRPMResetTrap(pVCpu);
6387 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6388 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6389 uGstErrorCode, pVmxTransient->uExitQual);
6390 }
6391 else
6392 {
6393 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6394 TRPMResetTrap(pVCpu);
6395 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6396 vmxHCSetPendingXcptDF(pVCpu);
6397 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6398 }
6399
6400 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6401 return VINF_SUCCESS;
6402 }
6403
6404 TRPMResetTrap(pVCpu);
6405 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6406 return rc;
6407}
6408
6409
6410/**
6411 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6412 *
6413 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6414 */
6415static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6416{
6417 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6418 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6419
6420 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
6421 AssertRCReturn(rc, rc);
6422
6423 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6424 {
6425 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6426 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6427
6428 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6429 * provides VM-exit instruction length. If this causes problem later,
6430 * disassemble the instruction like it's done on AMD-V. */
6431 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6432 AssertRCReturn(rc2, rc2);
6433 return rc;
6434 }
6435
6436 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6437 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6438 return VINF_SUCCESS;
6439}
6440
6441
6442/**
6443 * VM-exit exception handler for \#BP (Breakpoint exception).
6444 *
6445 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6446 */
6447static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6448{
6449 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6450 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6451
6452 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6453 AssertRCReturn(rc, rc);
6454
6455 VBOXSTRICTRC rcStrict;
6456 if (!pVmxTransient->fIsNestedGuest)
6457 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
6458 else
6459 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6460
6461 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6462 {
6463 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6464 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6465 rcStrict = VINF_SUCCESS;
6466 }
6467
6468 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6469 return rcStrict;
6470}
6471
6472
6473/**
6474 * VM-exit exception handler for \#AC (Alignment-check exception).
6475 *
6476 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6477 */
6478static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6479{
6480 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6481
6482 /*
6483 * Detect #ACs caused by host having enabled split-lock detection.
6484 * Emulate such instructions.
6485 */
6486 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo,
6487 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
6488 AssertRCReturn(rc, rc);
6489 /** @todo detect split lock in cpu feature? */
6490 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6491 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6492 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6493 || CPUMGetGuestCPL(pVCpu) != 3
6494 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6495 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6496 {
6497 /*
6498 * Check for debug/trace events and import state accordingly.
6499 */
6500 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6501 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6502 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
6503#ifndef IN_NEM_DARWIN
6504 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
6505#endif
6506 )
6507 {
6508 if (pVM->cCpus == 1)
6509 {
6510#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6511 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6512#else
6513 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6514#endif
6515 AssertRCReturn(rc, rc);
6516 }
6517 }
6518 else
6519 {
6520 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6521 AssertRCReturn(rc, rc);
6522
6523 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
6524
6525 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
6526 {
6527 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
6528 if (rcStrict != VINF_SUCCESS)
6529 return rcStrict;
6530 }
6531 }
6532
6533 /*
6534 * Emulate the instruction.
6535 *
6536 * We have to ignore the LOCK prefix here as we must not retrigger the
6537 * detection on the host. This isn't all that satisfactory, though...
6538 */
6539 if (pVM->cCpus == 1)
6540 {
6541 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
6542 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6543
6544 /** @todo For SMP configs we should do a rendezvous here. */
6545 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
6546 if (rcStrict == VINF_SUCCESS)
6547#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6548 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6549 HM_CHANGED_GUEST_RIP
6550 | HM_CHANGED_GUEST_RFLAGS
6551 | HM_CHANGED_GUEST_GPRS_MASK
6552 | HM_CHANGED_GUEST_CS
6553 | HM_CHANGED_GUEST_SS);
6554#else
6555 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6556#endif
6557 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6558 {
6559 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6560 rcStrict = VINF_SUCCESS;
6561 }
6562 return rcStrict;
6563 }
6564 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
6565 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6566 return VINF_EM_EMULATE_SPLIT_LOCK;
6567 }
6568
6569 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
6570 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6571 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
6572
6573 /* Re-inject it. We'll detect any nesting before getting here. */
6574 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6575 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6576 return VINF_SUCCESS;
6577}
6578
6579
6580/**
6581 * VM-exit exception handler for \#DB (Debug exception).
6582 *
6583 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6584 */
6585static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6586{
6587 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6588 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
6589
6590 /*
6591 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
6592 */
6593 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6594
6595 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
6596 uint64_t const uDR6 = X86_DR6_INIT_VAL
6597 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
6598 | X86_DR6_BD | X86_DR6_BS));
6599
6600 int rc;
6601 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6602 if (!pVmxTransient->fIsNestedGuest)
6603 {
6604 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6605
6606 /*
6607 * Prevents stepping twice over the same instruction when the guest is stepping using
6608 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
6609 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
6610 */
6611 if ( rc == VINF_EM_DBG_STEPPED
6612 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
6613 {
6614 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6615 rc = VINF_EM_RAW_GUEST_TRAP;
6616 }
6617 }
6618 else
6619 rc = VINF_EM_RAW_GUEST_TRAP;
6620 Log6Func(("rc=%Rrc\n", rc));
6621 if (rc == VINF_EM_RAW_GUEST_TRAP)
6622 {
6623 /*
6624 * The exception was for the guest. Update DR6, DR7.GD and
6625 * IA32_DEBUGCTL.LBR before forwarding it.
6626 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
6627 */
6628#ifndef IN_NEM_DARWIN
6629 VMMRZCallRing3Disable(pVCpu);
6630 HM_DISABLE_PREEMPT(pVCpu);
6631
6632 pCtx->dr[6] &= ~X86_DR6_B_MASK;
6633 pCtx->dr[6] |= uDR6;
6634 if (CPUMIsGuestDebugStateActive(pVCpu))
6635 ASMSetDR6(pCtx->dr[6]);
6636
6637 HM_RESTORE_PREEMPT();
6638 VMMRZCallRing3Enable(pVCpu);
6639#else
6640 /** @todo */
6641#endif
6642
6643 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
6644 AssertRCReturn(rc, rc);
6645
6646 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
6647 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
6648
6649 /* Paranoia. */
6650 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
6651 pCtx->dr[7] |= X86_DR7_RA1_MASK;
6652
6653 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
6654 AssertRC(rc);
6655
6656 /*
6657 * Raise #DB in the guest.
6658 *
6659 * It is important to reflect exactly what the VM-exit gave us (preserving the
6660 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
6661 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
6662 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
6663 *
6664 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
6665 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6666 */
6667 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6668 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6669 return VINF_SUCCESS;
6670 }
6671
6672 /*
6673 * Not a guest trap, must be a hypervisor related debug event then.
6674 * Update DR6 in case someone is interested in it.
6675 */
6676 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
6677 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
6678 CPUMSetHyperDR6(pVCpu, uDR6);
6679
6680 return rc;
6681}
6682
6683
6684/**
6685 * Hacks its way around the lovely mesa driver's backdoor accesses.
6686 *
6687 * @sa hmR0SvmHandleMesaDrvGp.
6688 */
6689static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6690{
6691 LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
6692 RT_NOREF(pCtx);
6693
6694 /* For now we'll just skip the instruction. */
6695 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6696}
6697
6698
6699/**
6700 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
6701 * backdoor logging w/o checking what it is running inside.
6702 *
6703 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
6704 * backdoor port and magic numbers loaded in registers.
6705 *
6706 * @returns true if it is, false if it isn't.
6707 * @sa hmR0SvmIsMesaDrvGp.
6708 */
6709DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6710{
6711 /* 0xed: IN eAX,dx */
6712 uint8_t abInstr[1];
6713 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
6714 return false;
6715
6716 /* Check that it is #GP(0). */
6717 if (pVmxTransient->uExitIntErrorCode != 0)
6718 return false;
6719
6720 /* Check magic and port. */
6721 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
6722 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
6723 if (pCtx->rax != UINT32_C(0x564d5868))
6724 return false;
6725 if (pCtx->dx != UINT32_C(0x5658))
6726 return false;
6727
6728 /* Flat ring-3 CS. */
6729 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
6730 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
6731 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
6732 if (pCtx->cs.Attr.n.u2Dpl != 3)
6733 return false;
6734 if (pCtx->cs.u64Base != 0)
6735 return false;
6736
6737 /* Check opcode. */
6738 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
6739 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
6740 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
6741 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
6742 if (RT_FAILURE(rc))
6743 return false;
6744 if (abInstr[0] != 0xed)
6745 return false;
6746
6747 return true;
6748}
6749
6750
6751/**
6752 * VM-exit exception handler for \#GP (General-protection exception).
6753 *
6754 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6755 */
6756static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6757{
6758 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6759 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
6760
6761 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6762 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6763#ifndef IN_NEM_DARWIN
6764 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
6765 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6766 { /* likely */ }
6767 else
6768#endif
6769 {
6770#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6771# ifndef IN_NEM_DARWIN
6772 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6773# else
6774 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6775# endif
6776#endif
6777 /*
6778 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
6779 * executing a nested-guest, reflect #GP to the guest or nested-guest.
6780 */
6781 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6782 AssertRCReturn(rc, rc);
6783 Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
6784 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
6785
6786 if ( pVmxTransient->fIsNestedGuest
6787 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
6788 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
6789 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6790 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6791 else
6792 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
6793 return rc;
6794 }
6795
6796#ifndef IN_NEM_DARWIN
6797 Assert(CPUMIsGuestInRealModeEx(pCtx));
6798 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
6799 Assert(!pVmxTransient->fIsNestedGuest);
6800
6801 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6802 AssertRCReturn(rc, rc);
6803
6804 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
6805 if (rcStrict == VINF_SUCCESS)
6806 {
6807 if (!CPUMIsGuestInRealModeEx(pCtx))
6808 {
6809 /*
6810 * The guest is no longer in real-mode, check if we can continue executing the
6811 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
6812 */
6813 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
6814 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
6815 {
6816 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
6817 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6818 }
6819 else
6820 {
6821 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
6822 rcStrict = VINF_EM_RESCHEDULE;
6823 }
6824 }
6825 else
6826 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6827 }
6828 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6829 {
6830 rcStrict = VINF_SUCCESS;
6831 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6832 }
6833 return VBOXSTRICTRC_VAL(rcStrict);
6834#endif
6835}
6836
6837
6838/**
6839 * VM-exit exception handler for \#DE (Divide Error).
6840 *
6841 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6842 */
6843static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6844{
6845 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6846 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
6847
6848 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6849 AssertRCReturn(rc, rc);
6850
6851 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
6852 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
6853 {
6854 uint8_t cbInstr = 0;
6855 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
6856 if (rc2 == VINF_SUCCESS)
6857 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
6858 else if (rc2 == VINF_EM_RAW_GUEST_TRAP)
6859 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
6860 else
6861 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
6862 }
6863 else
6864 rcStrict = VINF_SUCCESS; /* Do nothing. */
6865
6866 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
6867 if (RT_FAILURE(rc))
6868 {
6869 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6870 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6871 rcStrict = VINF_SUCCESS;
6872 }
6873
6874 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
6875 return VBOXSTRICTRC_VAL(rcStrict);
6876}
6877
6878
6879/**
6880 * VM-exit exception handler wrapper for all other exceptions that are not handled
6881 * by a specific handler.
6882 *
6883 * This simply re-injects the exception back into the VM without any special
6884 * processing.
6885 *
6886 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6887 */
6888static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6889{
6890 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6891
6892#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6893# ifndef IN_NEM_DARWIN
6894 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6895 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
6896 ("uVector=%#x u32XcptBitmap=%#X32\n",
6897 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
6898 NOREF(pVmcsInfo);
6899# endif
6900#endif
6901
6902 /*
6903 * Re-inject the exception into the guest. This cannot be a double-fault condition which
6904 * would have been handled while checking exits due to event delivery.
6905 */
6906 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6907
6908#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6909 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6910 AssertRCReturn(rc, rc);
6911 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6912#endif
6913
6914#ifdef VBOX_WITH_STATISTICS
6915 switch (uVector)
6916 {
6917 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
6918 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
6919 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
6920 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6921 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
6922 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
6923 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6924 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
6925 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
6926 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
6927 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
6928 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
6929 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
6930 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
6931 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
6932 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
6933 default:
6934 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
6935 break;
6936 }
6937#endif
6938
6939 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
6940 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
6941 NOREF(uVector);
6942
6943 /* Re-inject the original exception into the guest. */
6944 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6945 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6946 return VINF_SUCCESS;
6947}
6948
6949
6950/**
6951 * VM-exit exception handler for all exceptions (except NMIs!).
6952 *
6953 * @remarks This may be called for both guests and nested-guests. Take care to not
6954 * make assumptions and avoid doing anything that is not relevant when
6955 * executing a nested-guest (e.g., Mesa driver hacks).
6956 */
6957static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6958{
6959 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6960
6961 /*
6962 * If this VM-exit occurred while delivering an event through the guest IDT, take
6963 * action based on the return code and additional hints (e.g. for page-faults)
6964 * that will be updated in the VMX transient structure.
6965 */
6966 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
6967 if (rcStrict == VINF_SUCCESS)
6968 {
6969 /*
6970 * If an exception caused a VM-exit due to delivery of an event, the original
6971 * event may have to be re-injected into the guest. We shall reinject it and
6972 * continue guest execution. However, page-fault is a complicated case and
6973 * needs additional processing done in vmxHCExitXcptPF().
6974 */
6975 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
6976 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6977 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
6978 || uVector == X86_XCPT_PF)
6979 {
6980 switch (uVector)
6981 {
6982 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
6983 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
6984 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
6985 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
6986 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
6987 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
6988 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
6989 default:
6990 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
6991 }
6992 }
6993 /* else: inject pending event before resuming guest execution. */
6994 }
6995 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
6996 {
6997 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6998 rcStrict = VINF_SUCCESS;
6999 }
7000
7001 return rcStrict;
7002}
7003/** @} */
7004
7005
7006/** @name VM-exit handlers.
7007 * @{
7008 */
7009/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7010/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7011/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7012
7013/**
7014 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7015 */
7016HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7017{
7018 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7019 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7020
7021#ifndef IN_NEM_DARWIN
7022 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7023 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7024 return VINF_SUCCESS;
7025 return VINF_EM_RAW_INTERRUPT;
7026#else
7027 return VINF_SUCCESS;
7028#endif
7029}
7030
7031
7032/**
7033 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7034 * VM-exit.
7035 */
7036HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7037{
7038 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7039 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7040
7041 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
7042
7043 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7044 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7045 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7046
7047 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7048 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7049 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7050 NOREF(pVmcsInfo);
7051
7052 VBOXSTRICTRC rcStrict;
7053 switch (uExitIntType)
7054 {
7055#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7056 /*
7057 * Host physical NMIs:
7058 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7059 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7060 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7061 *
7062 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7063 * See Intel spec. 27.5.5 "Updating Non-Register State".
7064 */
7065 case VMX_EXIT_INT_INFO_TYPE_NMI:
7066 {
7067 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7068 break;
7069 }
7070#endif
7071
7072 /*
7073 * Privileged software exceptions (#DB from ICEBP),
7074 * Software exceptions (#BP and #OF),
7075 * Hardware exceptions:
7076 * Process the required exceptions and resume guest execution if possible.
7077 */
7078 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7079 Assert(uVector == X86_XCPT_DB);
7080 RT_FALL_THRU();
7081 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7082 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7083 RT_FALL_THRU();
7084 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7085 {
7086 NOREF(uVector);
7087 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
7088 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7089 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
7090 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
7091
7092 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7093 break;
7094 }
7095
7096 default:
7097 {
7098 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7099 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7100 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7101 break;
7102 }
7103 }
7104
7105 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7106 return rcStrict;
7107}
7108
7109
7110/**
7111 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7112 */
7113HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7114{
7115 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7116
7117 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7118 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7119 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7120
7121 /* Evaluate and deliver pending events and resume guest execution. */
7122 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7123 return VINF_SUCCESS;
7124}
7125
7126
7127/**
7128 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7129 */
7130HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7131{
7132 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7133
7134 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7135 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7136 {
7137 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7138 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7139 }
7140
7141 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
7142
7143 /*
7144 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7145 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7146 */
7147 uint32_t fIntrState;
7148 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7149 AssertRC(rc);
7150 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7151 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7152 {
7153 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7154 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7155
7156 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7157 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7158 AssertRC(rc);
7159 }
7160
7161 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7162 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7163
7164 /* Evaluate and deliver pending events and resume guest execution. */
7165 return VINF_SUCCESS;
7166}
7167
7168
7169/**
7170 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7171 */
7172HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7173{
7174 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7175 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7176}
7177
7178
7179/**
7180 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7181 */
7182HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7183{
7184 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7185 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7186}
7187
7188
7189/**
7190 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7191 */
7192HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7193{
7194 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7195
7196 /*
7197 * Get the state we need and update the exit history entry.
7198 */
7199 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7200 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7201
7202 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7203 AssertRCReturn(rc, rc);
7204
7205 VBOXSTRICTRC rcStrict;
7206 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7207 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7208 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7209 if (!pExitRec)
7210 {
7211 /*
7212 * Regular CPUID instruction execution.
7213 */
7214 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7215 if (rcStrict == VINF_SUCCESS)
7216 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7217 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7218 {
7219 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7220 rcStrict = VINF_SUCCESS;
7221 }
7222 }
7223 else
7224 {
7225 /*
7226 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7227 */
7228 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7229 AssertRCReturn(rc2, rc2);
7230
7231 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7232 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7233
7234 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7235 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7236
7237 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7238 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7239 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7240 }
7241 return rcStrict;
7242}
7243
7244
7245/**
7246 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7247 */
7248HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7249{
7250 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7251
7252 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7253 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
7254 AssertRCReturn(rc, rc);
7255
7256 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7257 return VINF_EM_RAW_EMULATE_INSTR;
7258
7259 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7260 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7261}
7262
7263
7264/**
7265 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7266 */
7267HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7268{
7269 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7270
7271 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7272 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7273 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
7274 AssertRCReturn(rc, rc);
7275
7276 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7277 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7278 {
7279 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7280 we must reset offsetting on VM-entry. See @bugref{6634}. */
7281 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7282 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7283 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7284 }
7285 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7286 {
7287 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7288 rcStrict = VINF_SUCCESS;
7289 }
7290 return rcStrict;
7291}
7292
7293
7294/**
7295 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7296 */
7297HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7298{
7299 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7300
7301 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7302 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7303 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
7304 AssertRCReturn(rc, rc);
7305
7306 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7307 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7308 {
7309 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7310 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7311 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7312 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7313 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7314 }
7315 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7316 {
7317 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7318 rcStrict = VINF_SUCCESS;
7319 }
7320 return rcStrict;
7321}
7322
7323
7324/**
7325 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7326 */
7327HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7328{
7329 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7330
7331 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7332 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
7333 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
7334 AssertRCReturn(rc, rc);
7335
7336 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7337 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7338 if (RT_LIKELY(rc == VINF_SUCCESS))
7339 {
7340 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7341 Assert(pVmxTransient->cbExitInstr == 2);
7342 }
7343 else
7344 {
7345 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7346 rc = VERR_EM_INTERPRETER;
7347 }
7348 return rc;
7349}
7350
7351
7352/**
7353 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7354 */
7355HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7356{
7357 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7358
7359 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7360 if (EMAreHypercallInstructionsEnabled(pVCpu))
7361 {
7362 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7363 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
7364 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
7365 AssertRCReturn(rc, rc);
7366
7367 /* Perform the hypercall. */
7368 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7369 if (rcStrict == VINF_SUCCESS)
7370 {
7371 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7372 AssertRCReturn(rc, rc);
7373 }
7374 else
7375 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7376 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7377 || RT_FAILURE(rcStrict));
7378
7379 /* If the hypercall changes anything other than guest's general-purpose registers,
7380 we would need to reload the guest changed bits here before VM-entry. */
7381 }
7382 else
7383 Log4Func(("Hypercalls not enabled\n"));
7384
7385 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7386 if (RT_FAILURE(rcStrict))
7387 {
7388 vmxHCSetPendingXcptUD(pVCpu);
7389 rcStrict = VINF_SUCCESS;
7390 }
7391
7392 return rcStrict;
7393}
7394
7395
7396/**
7397 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7398 */
7399HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7400{
7401 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7402#ifndef IN_NEM_DARWIN
7403 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7404#endif
7405
7406 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7407 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7408 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7409 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7410 AssertRCReturn(rc, rc);
7411
7412 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7413
7414 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7415 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7416 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7417 {
7418 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7419 rcStrict = VINF_SUCCESS;
7420 }
7421 else
7422 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7423 VBOXSTRICTRC_VAL(rcStrict)));
7424 return rcStrict;
7425}
7426
7427
7428/**
7429 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7430 */
7431HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7432{
7433 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7434
7435 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7436 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7437 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
7438 AssertRCReturn(rc, rc);
7439
7440 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7441 if (rcStrict == VINF_SUCCESS)
7442 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7443 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7444 {
7445 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7446 rcStrict = VINF_SUCCESS;
7447 }
7448
7449 return rcStrict;
7450}
7451
7452
7453/**
7454 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7455 */
7456HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7457{
7458 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7459
7460 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7461 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7462 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7463 AssertRCReturn(rc, rc);
7464
7465 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7466 if (RT_SUCCESS(rcStrict))
7467 {
7468 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7469 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7470 rcStrict = VINF_SUCCESS;
7471 }
7472
7473 return rcStrict;
7474}
7475
7476
7477/**
7478 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7479 * VM-exit.
7480 */
7481HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7482{
7483 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7484 return VINF_EM_RESET;
7485}
7486
7487
7488/**
7489 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7490 */
7491HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7492{
7493 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7494
7495 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7496 AssertRCReturn(rc, rc);
7497
7498 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7499 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
7500 rc = VINF_SUCCESS;
7501 else
7502 rc = VINF_EM_HALT;
7503
7504 if (rc != VINF_SUCCESS)
7505 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
7506 return rc;
7507}
7508
7509
7510/**
7511 * VM-exit handler for instructions that result in a \#UD exception delivered to
7512 * the guest.
7513 */
7514HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7515{
7516 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7517 vmxHCSetPendingXcptUD(pVCpu);
7518 return VINF_SUCCESS;
7519}
7520
7521
7522/**
7523 * VM-exit handler for expiry of the VMX-preemption timer.
7524 */
7525HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7526{
7527 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7528
7529 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
7530 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7531Log12(("vmxHCExitPreemptTimer:\n"));
7532
7533 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7534 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7535 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7536 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
7537 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7538}
7539
7540
7541/**
7542 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7543 */
7544HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7545{
7546 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7547
7548 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7549 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7550 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
7551 AssertRCReturn(rc, rc);
7552
7553 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
7554 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
7555 : HM_CHANGED_RAISED_XCPT_MASK);
7556
7557#ifndef IN_NEM_DARWIN
7558 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7559 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
7560 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
7561 {
7562 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
7563 hmR0VmxUpdateStartVmFunction(pVCpu);
7564 }
7565#endif
7566
7567 return rcStrict;
7568}
7569
7570
7571/**
7572 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7573 */
7574HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7575{
7576 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7577
7578 /** @todo Enable the new code after finding a reliably guest test-case. */
7579#if 1
7580 return VERR_EM_INTERPRETER;
7581#else
7582 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7583 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
7584 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7585 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
7586 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7587 AssertRCReturn(rc, rc);
7588
7589 /* Paranoia. Ensure this has a memory operand. */
7590 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
7591
7592 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
7593 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
7594 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
7595 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
7596
7597 RTGCPTR GCPtrDesc;
7598 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
7599
7600 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
7601 GCPtrDesc, uType);
7602 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7603 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7604 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7605 {
7606 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7607 rcStrict = VINF_SUCCESS;
7608 }
7609 return rcStrict;
7610#endif
7611}
7612
7613
7614/**
7615 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
7616 * VM-exit.
7617 */
7618HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7619{
7620 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7621 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7622 AssertRCReturn(rc, rc);
7623
7624 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
7625 if (RT_FAILURE(rc))
7626 return rc;
7627
7628 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
7629 NOREF(uInvalidReason);
7630
7631#ifdef VBOX_STRICT
7632 uint32_t fIntrState;
7633 uint64_t u64Val;
7634 vmxHCReadEntryIntInfoVmcs(pVCpu, pVmxTransient);
7635 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
7636 vmxHCReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7637
7638 Log4(("uInvalidReason %u\n", uInvalidReason));
7639 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
7640 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7641 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7642
7643 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
7644 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
7645 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
7646 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
7647 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
7648 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
7649 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
7650 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7651 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
7652 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
7653 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
7654 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7655# ifndef IN_NEM_DARWIN
7656 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
7657 {
7658 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7659 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7660 }
7661
7662 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
7663# endif
7664#endif
7665
7666 return VERR_VMX_INVALID_GUEST_STATE;
7667}
7668
7669/**
7670 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
7671 */
7672HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7673{
7674 /*
7675 * Cumulative notes of all recognized but unexpected VM-exits.
7676 *
7677 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
7678 * nested-paging is used.
7679 *
7680 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
7681 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
7682 * this function (and thereby stop VM execution) for handling such instructions.
7683 *
7684 *
7685 * VMX_EXIT_INIT_SIGNAL:
7686 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
7687 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
7688 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
7689 *
7690 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
7691 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
7692 * See Intel spec. "23.8 Restrictions on VMX operation".
7693 *
7694 * VMX_EXIT_SIPI:
7695 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
7696 * activity state is used. We don't make use of it as our guests don't have direct
7697 * access to the host local APIC.
7698 *
7699 * See Intel spec. 25.3 "Other Causes of VM-exits".
7700 *
7701 * VMX_EXIT_IO_SMI:
7702 * VMX_EXIT_SMI:
7703 * This can only happen if we support dual-monitor treatment of SMI, which can be
7704 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
7705 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
7706 * VMX root mode or receive an SMI. If we get here, something funny is going on.
7707 *
7708 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
7709 * See Intel spec. 25.3 "Other Causes of VM-Exits"
7710 *
7711 * VMX_EXIT_ERR_MSR_LOAD:
7712 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
7713 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
7714 * execution.
7715 *
7716 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
7717 *
7718 * VMX_EXIT_ERR_MACHINE_CHECK:
7719 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
7720 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
7721 * #MC exception abort class exception is raised. We thus cannot assume a
7722 * reasonable chance of continuing any sort of execution and we bail.
7723 *
7724 * See Intel spec. 15.1 "Machine-check Architecture".
7725 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
7726 *
7727 * VMX_EXIT_PML_FULL:
7728 * VMX_EXIT_VIRTUALIZED_EOI:
7729 * VMX_EXIT_APIC_WRITE:
7730 * We do not currently support any of these features and thus they are all unexpected
7731 * VM-exits.
7732 *
7733 * VMX_EXIT_GDTR_IDTR_ACCESS:
7734 * VMX_EXIT_LDTR_TR_ACCESS:
7735 * VMX_EXIT_RDRAND:
7736 * VMX_EXIT_RSM:
7737 * VMX_EXIT_VMFUNC:
7738 * VMX_EXIT_ENCLS:
7739 * VMX_EXIT_RDSEED:
7740 * VMX_EXIT_XSAVES:
7741 * VMX_EXIT_XRSTORS:
7742 * VMX_EXIT_UMWAIT:
7743 * VMX_EXIT_TPAUSE:
7744 * VMX_EXIT_LOADIWKEY:
7745 * These VM-exits are -not- caused unconditionally by execution of the corresponding
7746 * instruction. Any VM-exit for these instructions indicate a hardware problem,
7747 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
7748 *
7749 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
7750 */
7751 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7752 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
7753 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7754}
7755
7756
7757/**
7758 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7759 */
7760HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7761{
7762 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7763
7764 /** @todo Optimize this: We currently drag in the whole MSR state
7765 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7766 * MSRs required. That would require changes to IEM and possibly CPUM too.
7767 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7768 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7769 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7770 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7771 switch (idMsr)
7772 {
7773 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7774 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7775 }
7776
7777 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7778 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7779 AssertRCReturn(rc, rc);
7780
7781 Log4Func(("ecx=%#RX32\n", idMsr));
7782
7783#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7784 Assert(!pVmxTransient->fIsNestedGuest);
7785 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7786 {
7787 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
7788 && idMsr != MSR_K6_EFER)
7789 {
7790 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
7791 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7792 }
7793 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7794 {
7795 Assert(pVmcsInfo->pvMsrBitmap);
7796 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7797 if (fMsrpm & VMXMSRPM_ALLOW_RD)
7798 {
7799 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
7800 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7801 }
7802 }
7803 }
7804#endif
7805
7806 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
7807 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
7808 if (rcStrict == VINF_SUCCESS)
7809 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7810 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7811 {
7812 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7813 rcStrict = VINF_SUCCESS;
7814 }
7815 else
7816 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
7817 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7818
7819 return rcStrict;
7820}
7821
7822
7823/**
7824 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7825 */
7826HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7827{
7828 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7829
7830 /** @todo Optimize this: We currently drag in the whole MSR state
7831 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7832 * MSRs required. That would require changes to IEM and possibly CPUM too.
7833 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7834 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7835 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7836
7837 /*
7838 * The FS and GS base MSRs are not part of the above all-MSRs mask.
7839 * Although we don't need to fetch the base as it will be overwritten shortly, while
7840 * loading guest-state we would also load the entire segment register including limit
7841 * and attributes and thus we need to load them here.
7842 */
7843 switch (idMsr)
7844 {
7845 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7846 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7847 }
7848
7849 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7850 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7851 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7852 AssertRCReturn(rc, rc);
7853
7854 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
7855
7856 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
7857 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
7858
7859 if (rcStrict == VINF_SUCCESS)
7860 {
7861 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7862
7863 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7864 if ( idMsr == MSR_IA32_APICBASE
7865 || ( idMsr >= MSR_IA32_X2APIC_START
7866 && idMsr <= MSR_IA32_X2APIC_END))
7867 {
7868 /*
7869 * We've already saved the APIC related guest-state (TPR) in post-run phase.
7870 * When full APIC register virtualization is implemented we'll have to make
7871 * sure APIC state is saved from the VMCS before IEM changes it.
7872 */
7873 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7874 }
7875 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7876 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7877 else if (idMsr == MSR_K6_EFER)
7878 {
7879 /*
7880 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
7881 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
7882 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
7883 */
7884 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
7885 }
7886
7887 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
7888 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
7889 {
7890 switch (idMsr)
7891 {
7892 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
7893 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
7894 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
7895 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
7896 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
7897 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
7898 default:
7899 {
7900#ifndef IN_NEM_DARWIN
7901 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7902 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
7903 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7904 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
7905#else
7906 AssertMsgFailed(("TODO\n"));
7907#endif
7908 break;
7909 }
7910 }
7911 }
7912#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7913 else
7914 {
7915 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7916 switch (idMsr)
7917 {
7918 case MSR_IA32_SYSENTER_CS:
7919 case MSR_IA32_SYSENTER_EIP:
7920 case MSR_IA32_SYSENTER_ESP:
7921 case MSR_K8_FS_BASE:
7922 case MSR_K8_GS_BASE:
7923 {
7924 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
7925 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7926 }
7927
7928 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
7929 default:
7930 {
7931 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7932 {
7933 /* EFER MSR writes are always intercepted. */
7934 if (idMsr != MSR_K6_EFER)
7935 {
7936 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
7937 idMsr));
7938 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7939 }
7940 }
7941
7942 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7943 {
7944 Assert(pVmcsInfo->pvMsrBitmap);
7945 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7946 if (fMsrpm & VMXMSRPM_ALLOW_WR)
7947 {
7948 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
7949 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7950 }
7951 }
7952 break;
7953 }
7954 }
7955 }
7956#endif /* VBOX_STRICT */
7957 }
7958 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7959 {
7960 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7961 rcStrict = VINF_SUCCESS;
7962 }
7963 else
7964 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
7965 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7966
7967 return rcStrict;
7968}
7969
7970
7971/**
7972 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
7973 */
7974HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7975{
7976 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7977
7978 /** @todo The guest has likely hit a contended spinlock. We might want to
7979 * poke a schedule different guest VCPU. */
7980 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7981 if (RT_SUCCESS(rc))
7982 return VINF_EM_RAW_INTERRUPT;
7983
7984 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
7985 return rc;
7986}
7987
7988
7989/**
7990 * VM-exit handler for when the TPR value is lowered below the specified
7991 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
7992 */
7993HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7994{
7995 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7996 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
7997
7998 /*
7999 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8000 * We'll re-evaluate pending interrupts and inject them before the next VM
8001 * entry so we can just continue execution here.
8002 */
8003 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8004 return VINF_SUCCESS;
8005}
8006
8007
8008/**
8009 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8010 * VM-exit.
8011 *
8012 * @retval VINF_SUCCESS when guest execution can continue.
8013 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8014 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8015 * incompatible guest state for VMX execution (real-on-v86 case).
8016 */
8017HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8018{
8019 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8020 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8021
8022 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8023 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8024 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8025
8026 VBOXSTRICTRC rcStrict;
8027 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8028 uint64_t const uExitQual = pVmxTransient->uExitQual;
8029 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8030 switch (uAccessType)
8031 {
8032 /*
8033 * MOV to CRx.
8034 */
8035 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8036 {
8037 /*
8038 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8039 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8040 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8041 * PAE PDPTEs as well.
8042 */
8043 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8044 AssertRCReturn(rc, rc);
8045
8046 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8047#ifndef IN_NEM_DARWIN
8048 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8049#endif
8050 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8051 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8052
8053 /*
8054 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8055 * - When nested paging isn't used.
8056 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8057 * - We are executing in the VM debug loop.
8058 */
8059#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8060# ifndef IN_NEM_DARWIN
8061 Assert( iCrReg != 3
8062 || !VM_IS_VMX_NESTED_PAGING(pVM)
8063 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8064 || pVCpu->hmr0.s.fUsingDebugLoop);
8065# else
8066 Assert( iCrReg != 3
8067 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8068# endif
8069#endif
8070
8071 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8072 Assert( iCrReg != 8
8073 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8074
8075 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8076 AssertMsg( rcStrict == VINF_SUCCESS
8077 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8078
8079#ifndef IN_NEM_DARWIN
8080 /*
8081 * This is a kludge for handling switches back to real mode when we try to use
8082 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8083 * deal with special selector values, so we have to return to ring-3 and run
8084 * there till the selector values are V86 mode compatible.
8085 *
8086 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8087 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8088 * this function.
8089 */
8090 if ( iCrReg == 0
8091 && rcStrict == VINF_SUCCESS
8092 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8093 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8094 && (uOldCr0 & X86_CR0_PE)
8095 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8096 {
8097 /** @todo Check selectors rather than returning all the time. */
8098 Assert(!pVmxTransient->fIsNestedGuest);
8099 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8100 rcStrict = VINF_EM_RESCHEDULE_REM;
8101 }
8102#endif
8103
8104 break;
8105 }
8106
8107 /*
8108 * MOV from CRx.
8109 */
8110 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8111 {
8112 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8113 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8114
8115 /*
8116 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8117 * - When nested paging isn't used.
8118 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8119 * - We are executing in the VM debug loop.
8120 */
8121#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8122# ifndef IN_NEM_DARWIN
8123 Assert( iCrReg != 3
8124 || !VM_IS_VMX_NESTED_PAGING(pVM)
8125 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8126 || pVCpu->hmr0.s.fLeaveDone);
8127# else
8128 Assert( iCrReg != 3
8129 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8130# endif
8131#endif
8132
8133 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8134 Assert( iCrReg != 8
8135 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8136
8137 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8138 break;
8139 }
8140
8141 /*
8142 * CLTS (Clear Task-Switch Flag in CR0).
8143 */
8144 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8145 {
8146 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8147 break;
8148 }
8149
8150 /*
8151 * LMSW (Load Machine-Status Word into CR0).
8152 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8153 */
8154 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8155 {
8156 RTGCPTR GCPtrEffDst;
8157 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8158 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8159 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8160 if (fMemOperand)
8161 {
8162 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
8163 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8164 }
8165 else
8166 GCPtrEffDst = NIL_RTGCPTR;
8167 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8168 break;
8169 }
8170
8171 default:
8172 {
8173 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8174 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8175 }
8176 }
8177
8178 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8179 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8180 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8181
8182 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8183 NOREF(pVM);
8184 return rcStrict;
8185}
8186
8187
8188/**
8189 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8190 * VM-exit.
8191 */
8192HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8193{
8194 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8195 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8196
8197 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8198 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8199 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8200 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8201 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
8202 | CPUMCTX_EXTRN_EFER);
8203 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8204 AssertRCReturn(rc, rc);
8205
8206 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8207 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8208 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8209 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8210 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8211 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8212 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8213 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8214
8215 /*
8216 * Update exit history to see if this exit can be optimized.
8217 */
8218 VBOXSTRICTRC rcStrict;
8219 PCEMEXITREC pExitRec = NULL;
8220 if ( !fGstStepping
8221 && !fDbgStepping)
8222 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8223 !fIOString
8224 ? !fIOWrite
8225 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8226 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8227 : !fIOWrite
8228 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8229 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8230 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8231 if (!pExitRec)
8232 {
8233 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8234 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8235
8236 uint32_t const cbValue = s_aIOSizes[uIOSize];
8237 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8238 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8239 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8240 if (fIOString)
8241 {
8242 /*
8243 * INS/OUTS - I/O String instruction.
8244 *
8245 * Use instruction-information if available, otherwise fall back on
8246 * interpreting the instruction.
8247 */
8248 Log4Func(("cs:rip=%#04x:%#RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8249 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8250 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8251 if (fInsOutsInfo)
8252 {
8253 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8254 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8255 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8256 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8257 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8258 if (fIOWrite)
8259 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8260 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8261 else
8262 {
8263 /*
8264 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8265 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8266 * See Intel Instruction spec. for "INS".
8267 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8268 */
8269 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8270 }
8271 }
8272 else
8273 rcStrict = IEMExecOne(pVCpu);
8274
8275 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8276 fUpdateRipAlready = true;
8277 }
8278 else
8279 {
8280 /*
8281 * IN/OUT - I/O instruction.
8282 */
8283 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8284 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8285 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8286 if (fIOWrite)
8287 {
8288 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8289 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8290#ifndef IN_NEM_DARWIN
8291 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8292 && !pCtx->eflags.Bits.u1TF)
8293 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8294#endif
8295 }
8296 else
8297 {
8298 uint32_t u32Result = 0;
8299 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8300 if (IOM_SUCCESS(rcStrict))
8301 {
8302 /* Save result of I/O IN instr. in AL/AX/EAX. */
8303 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8304 }
8305#ifndef IN_NEM_DARWIN
8306 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8307 && !pCtx->eflags.Bits.u1TF)
8308 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8309#endif
8310 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8311 }
8312 }
8313
8314 if (IOM_SUCCESS(rcStrict))
8315 {
8316 if (!fUpdateRipAlready)
8317 {
8318 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8319 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8320 }
8321
8322 /*
8323 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8324 * while booting Fedora 17 64-bit guest.
8325 *
8326 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8327 */
8328 if (fIOString)
8329 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8330
8331 /*
8332 * If any I/O breakpoints are armed, we need to check if one triggered
8333 * and take appropriate action.
8334 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8335 */
8336 rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
8337 AssertRCReturn(rc, rc);
8338
8339 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8340 * execution engines about whether hyper BPs and such are pending. */
8341 uint32_t const uDr7 = pCtx->dr[7];
8342 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8343 && X86_DR7_ANY_RW_IO(uDr7)
8344 && (pCtx->cr4 & X86_CR4_DE))
8345 || DBGFBpIsHwIoArmed(pVM)))
8346 {
8347 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8348
8349#ifndef IN_NEM_DARWIN
8350 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8351 VMMRZCallRing3Disable(pVCpu);
8352 HM_DISABLE_PREEMPT(pVCpu);
8353
8354 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8355
8356 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8357 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8358 {
8359 /* Raise #DB. */
8360 if (fIsGuestDbgActive)
8361 ASMSetDR6(pCtx->dr[6]);
8362 if (pCtx->dr[7] != uDr7)
8363 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8364
8365 vmxHCSetPendingXcptDB(pVCpu);
8366 }
8367 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8368 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8369 else if ( rcStrict2 != VINF_SUCCESS
8370 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8371 rcStrict = rcStrict2;
8372 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8373
8374 HM_RESTORE_PREEMPT();
8375 VMMRZCallRing3Enable(pVCpu);
8376#else
8377 /** @todo */
8378#endif
8379 }
8380 }
8381
8382#ifdef VBOX_STRICT
8383 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8384 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8385 Assert(!fIOWrite);
8386 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8387 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8388 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8389 Assert(fIOWrite);
8390 else
8391 {
8392# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8393 * statuses, that the VMM device and some others may return. See
8394 * IOM_SUCCESS() for guidance. */
8395 AssertMsg( RT_FAILURE(rcStrict)
8396 || rcStrict == VINF_SUCCESS
8397 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8398 || rcStrict == VINF_EM_DBG_BREAKPOINT
8399 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8400 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8401# endif
8402 }
8403#endif
8404 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8405 }
8406 else
8407 {
8408 /*
8409 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8410 */
8411 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8412 AssertRCReturn(rc2, rc2);
8413 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8414 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8415 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8416 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8417 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8418 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8419
8420 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8421 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8422
8423 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8424 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8425 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8426 }
8427 return rcStrict;
8428}
8429
8430
8431/**
8432 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8433 * VM-exit.
8434 */
8435HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8436{
8437 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8438
8439 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8440 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8441 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8442 {
8443 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8444 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8445 {
8446 uint32_t uErrCode;
8447 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8448 {
8449 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8450 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8451 }
8452 else
8453 uErrCode = 0;
8454
8455 RTGCUINTPTR GCPtrFaultAddress;
8456 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8457 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8458 else
8459 GCPtrFaultAddress = 0;
8460
8461 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8462
8463 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8464 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8465
8466 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8467 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8468 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8469 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8470 }
8471 }
8472
8473 /* Fall back to the interpreter to emulate the task-switch. */
8474 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8475 return VERR_EM_INTERPRETER;
8476}
8477
8478
8479/**
8480 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8481 */
8482HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8483{
8484 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8485
8486 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8487 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
8488 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8489 AssertRC(rc);
8490 return VINF_EM_DBG_STEPPED;
8491}
8492
8493
8494/**
8495 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8496 */
8497HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8498{
8499 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8500 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
8501
8502 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8503 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8504 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8505 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8506 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8507
8508 /*
8509 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8510 */
8511 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8512 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8513 {
8514 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
8515 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
8516 {
8517 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8518 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8519 }
8520 }
8521 else
8522 {
8523 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8524 return rcStrict;
8525 }
8526
8527 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
8528 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8529 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8530 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8531 AssertRCReturn(rc, rc);
8532
8533 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8534 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
8535 switch (uAccessType)
8536 {
8537#ifndef IN_NEM_DARWIN
8538 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8539 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8540 {
8541 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
8542 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
8543 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8544
8545 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
8546 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
8547 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
8548 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
8549 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
8550
8551 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
8552 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
8553 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8554 if ( rcStrict == VINF_SUCCESS
8555 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8556 || rcStrict == VERR_PAGE_NOT_PRESENT)
8557 {
8558 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8559 | HM_CHANGED_GUEST_APIC_TPR);
8560 rcStrict = VINF_SUCCESS;
8561 }
8562 break;
8563 }
8564#else
8565 /** @todo */
8566#endif
8567
8568 default:
8569 {
8570 Log4Func(("uAccessType=%#x\n", uAccessType));
8571 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
8572 break;
8573 }
8574 }
8575
8576 if (rcStrict != VINF_SUCCESS)
8577 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
8578 return rcStrict;
8579}
8580
8581
8582/**
8583 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8584 * VM-exit.
8585 */
8586HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8587{
8588 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8589 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8590
8591 /*
8592 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
8593 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
8594 * must emulate the MOV DRx access.
8595 */
8596 if (!pVmxTransient->fIsNestedGuest)
8597 {
8598 /* We should -not- get this VM-exit if the guest's debug registers were active. */
8599 if (pVmxTransient->fWasGuestDebugStateActive)
8600 {
8601 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
8602 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8603 }
8604
8605 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
8606 && !pVmxTransient->fWasHyperDebugStateActive)
8607 {
8608 Assert(!DBGFIsStepping(pVCpu));
8609 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
8610
8611 /* Don't intercept MOV DRx any more. */
8612 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
8613 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8614 AssertRC(rc);
8615
8616#ifndef IN_NEM_DARWIN
8617 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
8618 VMMRZCallRing3Disable(pVCpu);
8619 HM_DISABLE_PREEMPT(pVCpu);
8620
8621 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8622 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
8623 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8624
8625 HM_RESTORE_PREEMPT();
8626 VMMRZCallRing3Enable(pVCpu);
8627#else
8628 CPUMR3NemActivateGuestDebugState(pVCpu);
8629 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8630 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
8631#endif
8632
8633#ifdef VBOX_WITH_STATISTICS
8634 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8635 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8636 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8637 else
8638 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8639#endif
8640 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
8641 return VINF_SUCCESS;
8642 }
8643 }
8644
8645 /*
8646 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
8647 * The EFER MSR is always up-to-date.
8648 * Update the segment registers and DR7 from the CPU.
8649 */
8650 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8651 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8652 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
8653 AssertRCReturn(rc, rc);
8654 Log4Func(("cs:rip=%#04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
8655
8656 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8657 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8658 {
8659 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8660 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
8661 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
8662 if (RT_SUCCESS(rc))
8663 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
8664 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8665 }
8666 else
8667 {
8668 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8669 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
8670 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
8671 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8672 }
8673
8674 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8675 if (RT_SUCCESS(rc))
8676 {
8677 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8678 AssertRCReturn(rc2, rc2);
8679 return VINF_SUCCESS;
8680 }
8681 return rc;
8682}
8683
8684
8685/**
8686 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8687 * Conditional VM-exit.
8688 */
8689HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8690{
8691 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8692
8693#ifndef IN_NEM_DARWIN
8694 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8695
8696 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8697 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8698 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8699 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8700 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8701
8702 /*
8703 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8704 */
8705 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8706 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8707 {
8708 /*
8709 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
8710 * instruction emulation to inject the original event. Otherwise, injecting the original event
8711 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
8712 */
8713 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8714 { /* likely */ }
8715 else
8716 {
8717 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8718#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8719 /** @todo NSTVMX: Think about how this should be handled. */
8720 if (pVmxTransient->fIsNestedGuest)
8721 return VERR_VMX_IPE_3;
8722#endif
8723 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8724 }
8725 }
8726 else
8727 {
8728 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8729 return rcStrict;
8730 }
8731
8732 /*
8733 * Get sufficient state and update the exit history entry.
8734 */
8735 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8736 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8737 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8738 AssertRCReturn(rc, rc);
8739
8740 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8741 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8742 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
8743 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8744 if (!pExitRec)
8745 {
8746 /*
8747 * If we succeed, resume guest execution.
8748 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8749 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8750 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8751 * weird case. See @bugref{6043}.
8752 */
8753 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8754 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8755/** @todo bird: We can probably just go straight to IOM here and assume that
8756 * it's MMIO, then fall back on PGM if that hunch didn't work out so
8757 * well. However, we need to address that aliasing workarounds that
8758 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
8759 *
8760 * Might also be interesting to see if we can get this done more or
8761 * less locklessly inside IOM. Need to consider the lookup table
8762 * updating and use a bit more carefully first (or do all updates via
8763 * rendezvous) */
8764 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
8765 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
8766 if ( rcStrict == VINF_SUCCESS
8767 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8768 || rcStrict == VERR_PAGE_NOT_PRESENT)
8769 {
8770 /* Successfully handled MMIO operation. */
8771 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8772 | HM_CHANGED_GUEST_APIC_TPR);
8773 rcStrict = VINF_SUCCESS;
8774 }
8775 }
8776 else
8777 {
8778 /*
8779 * Frequent exit or something needing probing. Call EMHistoryExec.
8780 */
8781 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
8782 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
8783
8784 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8785 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8786
8787 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8788 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8789 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8790 }
8791 return rcStrict;
8792#else
8793 AssertFailed();
8794 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
8795#endif
8796}
8797
8798
8799/**
8800 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8801 * VM-exit.
8802 */
8803HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8804{
8805 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8806#ifndef IN_NEM_DARWIN
8807 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8808
8809 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8810 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8811 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8812 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8813 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8814 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8815
8816 /*
8817 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8818 */
8819 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8820 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8821 {
8822 /*
8823 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
8824 * we shall resolve the nested #PF and re-inject the original event.
8825 */
8826 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8827 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
8828 }
8829 else
8830 {
8831 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8832 return rcStrict;
8833 }
8834
8835 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8836 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8837 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8838 AssertRCReturn(rc, rc);
8839
8840 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8841 uint64_t const uExitQual = pVmxTransient->uExitQual;
8842 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
8843
8844 RTGCUINT uErrorCode = 0;
8845 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
8846 uErrorCode |= X86_TRAP_PF_ID;
8847 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8848 uErrorCode |= X86_TRAP_PF_RW;
8849 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
8850 uErrorCode |= X86_TRAP_PF_P;
8851
8852 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8853 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%#RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
8854
8855 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8856
8857 /*
8858 * Handle the pagefault trap for the nested shadow table.
8859 */
8860 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8861 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
8862 TRPMResetTrap(pVCpu);
8863
8864 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8865 if ( rcStrict == VINF_SUCCESS
8866 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8867 || rcStrict == VERR_PAGE_NOT_PRESENT)
8868 {
8869 /* Successfully synced our nested page tables. */
8870 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
8871 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
8872 return VINF_SUCCESS;
8873 }
8874#else
8875 PVM pVM = pVCpu->CTX_SUFF(pVM);
8876 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
8877 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8878 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8879 vmxHCImportGuestRip(pVCpu);
8880 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
8881
8882 /*
8883 * Ask PGM for information about the given GCPhys. We need to check if we're
8884 * out of sync first.
8885 */
8886 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
8887 PGMPHYSNEMPAGEINFO Info;
8888 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
8889 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
8890 if (RT_SUCCESS(rc))
8891 {
8892 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8893 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
8894 {
8895 if (State.fCanResume)
8896 {
8897 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
8898 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8899 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8900 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8901 State.fDidSomething ? "" : " no-change"));
8902 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
8903 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8904 return VINF_SUCCESS;
8905 }
8906 }
8907
8908 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
8909 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8910 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8911 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8912 State.fDidSomething ? "" : " no-change"));
8913 }
8914 else
8915 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
8916 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8917 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
8918
8919 /*
8920 * Emulate the memory access, either access handler or special memory.
8921 */
8922 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
8923 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8924 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
8925 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
8926 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8927
8928 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8929 AssertRCReturn(rc, rc);
8930
8931 VBOXSTRICTRC rcStrict;
8932 if (!pExitRec)
8933 rcStrict = IEMExecOne(pVCpu);
8934 else
8935 {
8936 /* Frequent access or probing. */
8937 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8938 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8939 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8940 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8941 }
8942
8943 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8944#endif
8945
8946 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8947 return rcStrict;
8948}
8949
8950
8951#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8952/**
8953 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
8954 */
8955HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8956{
8957 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8958
8959 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8960 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8961 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8962 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8963 | CPUMCTX_EXTRN_HWVIRT
8964 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8965 AssertRCReturn(rc, rc);
8966
8967 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8968
8969 VMXVEXITINFO ExitInfo;
8970 RT_ZERO(ExitInfo);
8971 ExitInfo.uReason = pVmxTransient->uExitReason;
8972 ExitInfo.u64Qual = pVmxTransient->uExitQual;
8973 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
8974 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
8975 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
8976
8977 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
8978 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8979 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
8980 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8981 {
8982 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8983 rcStrict = VINF_SUCCESS;
8984 }
8985 return rcStrict;
8986}
8987
8988
8989/**
8990 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
8991 */
8992HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8993{
8994 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8995
8996 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
8997 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
8998 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8999 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9000 AssertRCReturn(rc, rc);
9001
9002 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9003
9004 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9005 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9006 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9007 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9008 {
9009 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9010 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9011 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9012 }
9013 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9014 return rcStrict;
9015}
9016
9017
9018/**
9019 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9020 */
9021HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9022{
9023 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9024
9025 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9026 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9027 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9028 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9029 | CPUMCTX_EXTRN_HWVIRT
9030 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9031 AssertRCReturn(rc, rc);
9032
9033 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9034
9035 VMXVEXITINFO ExitInfo;
9036 RT_ZERO(ExitInfo);
9037 ExitInfo.uReason = pVmxTransient->uExitReason;
9038 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9039 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9040 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9041 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9042
9043 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9044 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9045 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9046 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9047 {
9048 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9049 rcStrict = VINF_SUCCESS;
9050 }
9051 return rcStrict;
9052}
9053
9054
9055/**
9056 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9057 */
9058HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9059{
9060 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9061
9062 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9063 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9064 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9065 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9066 | CPUMCTX_EXTRN_HWVIRT
9067 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9068 AssertRCReturn(rc, rc);
9069
9070 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9071
9072 VMXVEXITINFO ExitInfo;
9073 RT_ZERO(ExitInfo);
9074 ExitInfo.uReason = pVmxTransient->uExitReason;
9075 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9076 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9077 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9078 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9079
9080 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9081 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9082 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9083 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9084 {
9085 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9086 rcStrict = VINF_SUCCESS;
9087 }
9088 return rcStrict;
9089}
9090
9091
9092/**
9093 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9094 */
9095HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9096{
9097 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9098
9099 /*
9100 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9101 * thus might not need to import the shadow VMCS state, it's safer just in case
9102 * code elsewhere dares look at unsynced VMCS fields.
9103 */
9104 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9105 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9106 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9107 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9108 | CPUMCTX_EXTRN_HWVIRT
9109 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9110 AssertRCReturn(rc, rc);
9111
9112 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9113
9114 VMXVEXITINFO ExitInfo;
9115 RT_ZERO(ExitInfo);
9116 ExitInfo.uReason = pVmxTransient->uExitReason;
9117 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9118 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9119 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9120 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9121 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9122
9123 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9124 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9125 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9126 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9127 {
9128 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9129 rcStrict = VINF_SUCCESS;
9130 }
9131 return rcStrict;
9132}
9133
9134
9135/**
9136 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9137 */
9138HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9139{
9140 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9141
9142 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9143 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9144 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9145 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9146 AssertRCReturn(rc, rc);
9147
9148 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9149
9150 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9151 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9152 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9153 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9154 {
9155 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9156 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9157 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9158 }
9159 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9160 return rcStrict;
9161}
9162
9163
9164/**
9165 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9166 */
9167HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9168{
9169 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9170
9171 /*
9172 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9173 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9174 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9175 */
9176 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9177 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9178 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9179 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9180 | CPUMCTX_EXTRN_HWVIRT
9181 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9182 AssertRCReturn(rc, rc);
9183
9184 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9185
9186 VMXVEXITINFO ExitInfo;
9187 RT_ZERO(ExitInfo);
9188 ExitInfo.uReason = pVmxTransient->uExitReason;
9189 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9190 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9191 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9192 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9193 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9194
9195 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9196 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9197 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9198 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9199 {
9200 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9201 rcStrict = VINF_SUCCESS;
9202 }
9203 return rcStrict;
9204}
9205
9206
9207/**
9208 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9209 */
9210HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9211{
9212 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9213
9214 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9215 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
9216 | CPUMCTX_EXTRN_HWVIRT
9217 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9218 AssertRCReturn(rc, rc);
9219
9220 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9221
9222 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9223 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9224 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9225 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9226 {
9227 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9228 rcStrict = VINF_SUCCESS;
9229 }
9230 return rcStrict;
9231}
9232
9233
9234/**
9235 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9236 */
9237HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9238{
9239 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9240
9241 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9242 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9243 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9244 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9245 | CPUMCTX_EXTRN_HWVIRT
9246 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9247 AssertRCReturn(rc, rc);
9248
9249 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9250
9251 VMXVEXITINFO ExitInfo;
9252 RT_ZERO(ExitInfo);
9253 ExitInfo.uReason = pVmxTransient->uExitReason;
9254 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9255 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9256 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9257 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9258
9259 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9260 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9261 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9262 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9263 {
9264 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9265 rcStrict = VINF_SUCCESS;
9266 }
9267 return rcStrict;
9268}
9269
9270
9271/**
9272 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9273 */
9274HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9275{
9276 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9277
9278 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9279 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9280 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9281 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9282 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9283 AssertRCReturn(rc, rc);
9284
9285 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9286
9287 VMXVEXITINFO ExitInfo;
9288 RT_ZERO(ExitInfo);
9289 ExitInfo.uReason = pVmxTransient->uExitReason;
9290 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9291 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9292 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9293 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9294
9295 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9296 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9297 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9298 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9299 {
9300 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9301 rcStrict = VINF_SUCCESS;
9302 }
9303 return rcStrict;
9304}
9305
9306
9307# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9308/**
9309 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9310 */
9311HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9312{
9313 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9314
9315 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9316 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9317 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9318 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9319 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9320 AssertRCReturn(rc, rc);
9321
9322 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9323
9324 VMXVEXITINFO ExitInfo;
9325 RT_ZERO(ExitInfo);
9326 ExitInfo.uReason = pVmxTransient->uExitReason;
9327 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9328 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9329 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9330 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9331
9332 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9333 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9334 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9335 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9336 {
9337 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9338 rcStrict = VINF_SUCCESS;
9339 }
9340 return rcStrict;
9341}
9342# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9343#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9344/** @} */
9345
9346
9347#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9348/** @name Nested-guest VM-exit handlers.
9349 * @{
9350 */
9351/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9352/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9353/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9354
9355/**
9356 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9357 * Conditional VM-exit.
9358 */
9359HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9360{
9361 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9362
9363 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
9364
9365 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9366 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9367 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9368
9369 switch (uExitIntType)
9370 {
9371#ifndef IN_NEM_DARWIN
9372 /*
9373 * Physical NMIs:
9374 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9375 */
9376 case VMX_EXIT_INT_INFO_TYPE_NMI:
9377 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9378#endif
9379
9380 /*
9381 * Hardware exceptions,
9382 * Software exceptions,
9383 * Privileged software exceptions:
9384 * Figure out if the exception must be delivered to the guest or the nested-guest.
9385 */
9386 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9387 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9388 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9389 {
9390 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
9391 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9392 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9393 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9394
9395 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9396 bool const fIntercept = CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo),
9397 pVmxTransient->uExitIntErrorCode);
9398 if (fIntercept)
9399 {
9400 /* Exit qualification is required for debug and page-fault exceptions. */
9401 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9402
9403 /*
9404 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9405 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9406 * length. However, if delivery of a software interrupt, software exception or privileged
9407 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9408 */
9409 VMXVEXITINFO ExitInfo;
9410 RT_ZERO(ExitInfo);
9411 ExitInfo.uReason = pVmxTransient->uExitReason;
9412 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9413 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9414
9415 VMXVEXITEVENTINFO ExitEventInfo;
9416 RT_ZERO(ExitEventInfo);
9417 ExitEventInfo.uExitIntInfo = pVmxTransient->uExitIntInfo;
9418 ExitEventInfo.uExitIntErrCode = pVmxTransient->uExitIntErrorCode;
9419 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9420 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9421
9422#ifdef DEBUG_ramshankar
9423 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9424 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n", pVmxTransient->uExitIntInfo,
9425 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9426 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9427 {
9428 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n", pVmxTransient->uIdtVectoringInfo,
9429 pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9430 }
9431#endif
9432 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9433 }
9434
9435 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9436 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9437 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9438 }
9439
9440 /*
9441 * Software interrupts:
9442 * VM-exits cannot be caused by software interrupts.
9443 *
9444 * External interrupts:
9445 * This should only happen when "acknowledge external interrupts on VM-exit"
9446 * control is set. However, we never set this when executing a guest or
9447 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9448 * the guest.
9449 */
9450 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9451 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9452 default:
9453 {
9454 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9455 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9456 }
9457 }
9458}
9459
9460
9461/**
9462 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
9463 * Unconditional VM-exit.
9464 */
9465HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9466{
9467 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9468 return IEMExecVmxVmexitTripleFault(pVCpu);
9469}
9470
9471
9472/**
9473 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9474 */
9475HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9476{
9477 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9478
9479 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
9480 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9481 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9482}
9483
9484
9485/**
9486 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9487 */
9488HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9489{
9490 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9491
9492 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
9493 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9494 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9495}
9496
9497
9498/**
9499 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
9500 * Unconditional VM-exit.
9501 */
9502HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9503{
9504 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9505
9506 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9507 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9508 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9509 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9510
9511 VMXVEXITINFO ExitInfo;
9512 RT_ZERO(ExitInfo);
9513 ExitInfo.uReason = pVmxTransient->uExitReason;
9514 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9515 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9516
9517 VMXVEXITEVENTINFO ExitEventInfo;
9518 RT_ZERO(ExitEventInfo);
9519 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9520 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9521 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
9522}
9523
9524
9525/**
9526 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
9527 */
9528HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9529{
9530 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9531
9532 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
9533 {
9534 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9535 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9536 }
9537 return vmxHCExitHlt(pVCpu, pVmxTransient);
9538}
9539
9540
9541/**
9542 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
9543 */
9544HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9545{
9546 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9547
9548 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
9549 {
9550 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9551 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9552
9553 VMXVEXITINFO ExitInfo;
9554 RT_ZERO(ExitInfo);
9555 ExitInfo.uReason = pVmxTransient->uExitReason;
9556 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9557 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9558 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9559 }
9560 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
9561}
9562
9563
9564/**
9565 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
9566 */
9567HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9568{
9569 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9570
9571 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
9572 {
9573 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9574 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9575 }
9576 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
9577}
9578
9579
9580/**
9581 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
9582 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
9583 */
9584HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9585{
9586 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9587
9588 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
9589 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
9590
9591 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9592
9593 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
9594 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9595 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9596
9597 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
9598 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
9599 u64VmcsField &= UINT64_C(0xffffffff);
9600
9601 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
9602 {
9603 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9604 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9605
9606 VMXVEXITINFO ExitInfo;
9607 RT_ZERO(ExitInfo);
9608 ExitInfo.uReason = pVmxTransient->uExitReason;
9609 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9610 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9611 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9612 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9613 }
9614
9615 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
9616 return vmxHCExitVmread(pVCpu, pVmxTransient);
9617 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
9618}
9619
9620
9621/**
9622 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
9623 */
9624HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9625{
9626 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9627
9628 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
9629 {
9630 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9631 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9632 }
9633
9634 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
9635}
9636
9637
9638/**
9639 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
9640 * Conditional VM-exit.
9641 */
9642HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9643{
9644 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9645
9646 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9647 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9648
9649 VBOXSTRICTRC rcStrict;
9650 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
9651 switch (uAccessType)
9652 {
9653 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
9654 {
9655 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9656 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9657 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9658 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9659
9660 bool fIntercept;
9661 switch (iCrReg)
9662 {
9663 case 0:
9664 case 4:
9665 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
9666 break;
9667
9668 case 3:
9669 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
9670 break;
9671
9672 case 8:
9673 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
9674 break;
9675
9676 default:
9677 fIntercept = false;
9678 break;
9679 }
9680 if (fIntercept)
9681 {
9682 VMXVEXITINFO ExitInfo;
9683 RT_ZERO(ExitInfo);
9684 ExitInfo.uReason = pVmxTransient->uExitReason;
9685 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9686 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9687 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9688 }
9689 else
9690 {
9691 int const rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
9692 AssertRCReturn(rc, rc);
9693 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9694 }
9695 break;
9696 }
9697
9698 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
9699 {
9700 /*
9701 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
9702 * CR2 reads do not cause a VM-exit.
9703 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
9704 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
9705 */
9706 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9707 if ( iCrReg == 3
9708 || iCrReg == 8)
9709 {
9710 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
9711 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
9712 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
9713 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
9714 {
9715 VMXVEXITINFO ExitInfo;
9716 RT_ZERO(ExitInfo);
9717 ExitInfo.uReason = pVmxTransient->uExitReason;
9718 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9719 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9720 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9721 }
9722 else
9723 {
9724 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9725 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9726 }
9727 }
9728 else
9729 {
9730 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
9731 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
9732 }
9733 break;
9734 }
9735
9736 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
9737 {
9738 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
9739 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
9740 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
9741 if ( (uGstHostMask & X86_CR0_TS)
9742 && (uReadShadow & X86_CR0_TS))
9743 {
9744 VMXVEXITINFO ExitInfo;
9745 RT_ZERO(ExitInfo);
9746 ExitInfo.uReason = pVmxTransient->uExitReason;
9747 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9748 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9749 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9750 }
9751 else
9752 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
9753 break;
9754 }
9755
9756 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
9757 {
9758 RTGCPTR GCPtrEffDst;
9759 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
9760 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
9761 if (fMemOperand)
9762 {
9763 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9764 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
9765 }
9766 else
9767 GCPtrEffDst = NIL_RTGCPTR;
9768
9769 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
9770 {
9771 VMXVEXITINFO ExitInfo;
9772 RT_ZERO(ExitInfo);
9773 ExitInfo.uReason = pVmxTransient->uExitReason;
9774 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9775 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
9776 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9777 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9778 }
9779 else
9780 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
9781 break;
9782 }
9783
9784 default:
9785 {
9786 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
9787 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
9788 }
9789 }
9790
9791 if (rcStrict == VINF_IEM_RAISED_XCPT)
9792 {
9793 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9794 rcStrict = VINF_SUCCESS;
9795 }
9796 return rcStrict;
9797}
9798
9799
9800/**
9801 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
9802 * Conditional VM-exit.
9803 */
9804HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9805{
9806 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9807
9808 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
9809 {
9810 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9811 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9812
9813 VMXVEXITINFO ExitInfo;
9814 RT_ZERO(ExitInfo);
9815 ExitInfo.uReason = pVmxTransient->uExitReason;
9816 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9817 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9818 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9819 }
9820 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
9821}
9822
9823
9824/**
9825 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
9826 * Conditional VM-exit.
9827 */
9828HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9829{
9830 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9831
9832 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9833
9834 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
9835 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
9836 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
9837
9838 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
9839 uint8_t const cbAccess = s_aIOSizes[uIOSize];
9840 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
9841 {
9842 /*
9843 * IN/OUT instruction:
9844 * - Provides VM-exit instruction length.
9845 *
9846 * INS/OUTS instruction:
9847 * - Provides VM-exit instruction length.
9848 * - Provides Guest-linear address.
9849 * - Optionally provides VM-exit instruction info (depends on CPU feature).
9850 */
9851 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9852 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9853
9854 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
9855 pVmxTransient->ExitInstrInfo.u = 0;
9856 pVmxTransient->uGuestLinearAddr = 0;
9857
9858 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
9859 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
9860 if (fIOString)
9861 {
9862 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9863 if (fVmxInsOutsInfo)
9864 {
9865 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
9866 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9867 }
9868 }
9869
9870 VMXVEXITINFO ExitInfo;
9871 RT_ZERO(ExitInfo);
9872 ExitInfo.uReason = pVmxTransient->uExitReason;
9873 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9874 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9875 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9876 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
9877 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9878 }
9879 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
9880}
9881
9882
9883/**
9884 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
9885 */
9886HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9887{
9888 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9889
9890 uint32_t fMsrpm;
9891 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9892 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9893 else
9894 fMsrpm = VMXMSRPM_EXIT_RD;
9895
9896 if (fMsrpm & VMXMSRPM_EXIT_RD)
9897 {
9898 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9899 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9900 }
9901 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
9902}
9903
9904
9905/**
9906 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
9907 */
9908HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9909{
9910 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9911
9912 uint32_t fMsrpm;
9913 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9914 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9915 else
9916 fMsrpm = VMXMSRPM_EXIT_WR;
9917
9918 if (fMsrpm & VMXMSRPM_EXIT_WR)
9919 {
9920 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9921 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9922 }
9923 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
9924}
9925
9926
9927/**
9928 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
9929 */
9930HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9931{
9932 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9933
9934 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
9935 {
9936 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9937 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9938 }
9939 return vmxHCExitMwait(pVCpu, pVmxTransient);
9940}
9941
9942
9943/**
9944 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
9945 * VM-exit.
9946 */
9947HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9948{
9949 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9950
9951 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
9952 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9953 VMXVEXITINFO ExitInfo;
9954 RT_ZERO(ExitInfo);
9955 ExitInfo.uReason = pVmxTransient->uExitReason;
9956 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9957 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9958}
9959
9960
9961/**
9962 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
9963 */
9964HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9965{
9966 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9967
9968 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
9969 {
9970 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9971 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9972 }
9973 return vmxHCExitMonitor(pVCpu, pVmxTransient);
9974}
9975
9976
9977/**
9978 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
9979 */
9980HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9981{
9982 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9983
9984 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
9985 * PAUSE when executing a nested-guest? If it does not, we would not need
9986 * to check for the intercepts here. Just call VM-exit... */
9987
9988 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
9989 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
9990 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
9991 {
9992 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9993 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9994 }
9995 return vmxHCExitPause(pVCpu, pVmxTransient);
9996}
9997
9998
9999/**
10000 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10001 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10002 */
10003HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10004{
10005 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10006
10007 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10008 {
10009 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
10010 VMXVEXITINFO ExitInfo;
10011 RT_ZERO(ExitInfo);
10012 ExitInfo.uReason = pVmxTransient->uExitReason;
10013 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
10014 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10015 }
10016 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10017}
10018
10019
10020/**
10021 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10022 * VM-exit.
10023 */
10024HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10025{
10026 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10027
10028 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10029 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10030 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10031 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10032
10033 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10034
10035 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10036 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10037
10038 VMXVEXITINFO ExitInfo;
10039 RT_ZERO(ExitInfo);
10040 ExitInfo.uReason = pVmxTransient->uExitReason;
10041 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10042 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10043
10044 VMXVEXITEVENTINFO ExitEventInfo;
10045 RT_ZERO(ExitEventInfo);
10046 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10047 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10048 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10049}
10050
10051
10052/**
10053 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10054 * Conditional VM-exit.
10055 */
10056HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10057{
10058 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10059
10060 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10061 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10062 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10063}
10064
10065
10066/**
10067 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10068 * Conditional VM-exit.
10069 */
10070HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10071{
10072 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10073
10074 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10075 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10076 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10077}
10078
10079
10080/**
10081 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10082 */
10083HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10084{
10085 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10086
10087 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10088 {
10089 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10090 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10091 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10092 }
10093 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10094}
10095
10096
10097/**
10098 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10099 */
10100HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10101{
10102 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10103
10104 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10105 {
10106 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10107 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10108 }
10109 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10110}
10111
10112
10113/**
10114 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10115 */
10116HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10117{
10118 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10119
10120 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10121 {
10122 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10123 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10124 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10125 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10126
10127 VMXVEXITINFO ExitInfo;
10128 RT_ZERO(ExitInfo);
10129 ExitInfo.uReason = pVmxTransient->uExitReason;
10130 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10131 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10132 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10133 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10134 }
10135 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10136}
10137
10138
10139/**
10140 * Nested-guest VM-exit handler for invalid-guest state
10141 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10142 */
10143HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10144{
10145 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10146
10147 /*
10148 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10149 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10150 * Handle it like it's in an invalid guest state of the outer guest.
10151 *
10152 * When the fast path is implemented, this should be changed to cause the corresponding
10153 * nested-guest VM-exit.
10154 */
10155 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10156}
10157
10158
10159/**
10160 * Nested-guest VM-exit handler for instructions that cause VM-exits uncondtionally
10161 * and only provide the instruction length.
10162 *
10163 * Unconditional VM-exit.
10164 */
10165HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10166{
10167 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10168
10169#ifdef VBOX_STRICT
10170 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10171 switch (pVmxTransient->uExitReason)
10172 {
10173 case VMX_EXIT_ENCLS:
10174 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10175 break;
10176
10177 case VMX_EXIT_VMFUNC:
10178 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10179 break;
10180 }
10181#endif
10182
10183 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10184 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10185}
10186
10187
10188/**
10189 * Nested-guest VM-exit handler for instructions that provide instruction length as
10190 * well as more information.
10191 *
10192 * Unconditional VM-exit.
10193 */
10194HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10195{
10196 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10197
10198#ifdef VBOX_STRICT
10199 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10200 switch (pVmxTransient->uExitReason)
10201 {
10202 case VMX_EXIT_GDTR_IDTR_ACCESS:
10203 case VMX_EXIT_LDTR_TR_ACCESS:
10204 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10205 break;
10206
10207 case VMX_EXIT_RDRAND:
10208 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10209 break;
10210
10211 case VMX_EXIT_RDSEED:
10212 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10213 break;
10214
10215 case VMX_EXIT_XSAVES:
10216 case VMX_EXIT_XRSTORS:
10217 /** @todo NSTVMX: Verify XSS-bitmap. */
10218 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10219 break;
10220
10221 case VMX_EXIT_UMWAIT:
10222 case VMX_EXIT_TPAUSE:
10223 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10224 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10225 break;
10226
10227 case VMX_EXIT_LOADIWKEY:
10228 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10229 break;
10230 }
10231#endif
10232
10233 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10234 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10235 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10236
10237 VMXVEXITINFO ExitInfo;
10238 RT_ZERO(ExitInfo);
10239 ExitInfo.uReason = pVmxTransient->uExitReason;
10240 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10241 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10242 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10243 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10244}
10245
10246
10247# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10248/**
10249 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10250 * Conditional VM-exit.
10251 */
10252HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10253{
10254 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10255 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10256
10257 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10258 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10259 {
10260 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
10261 AssertRCReturn(rc, rc);
10262
10263 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10264 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10265 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10266
10267 RTGCPHYS const GCPhysNested = pVmxTransient->uGuestPhysicalAddr;
10268 uint64_t const uExitQual = pVmxTransient->uExitQual;
10269
10270 RTGCPTR GCPtrNested;
10271 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10272 if (fIsLinearAddrValid)
10273 {
10274 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
10275 GCPtrNested = pVmxTransient->uGuestLinearAddr;
10276 }
10277 else
10278 GCPtrNested = 0;
10279
10280 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10281 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10282 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10283 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10284 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10285
10286 PGMPTWALK Walk;
10287 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10288 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, CPUMCTX2CORE(pCtx), GCPhysNested,
10289 fIsLinearAddrValid, GCPtrNested, &Walk);
10290 if (RT_SUCCESS(rcStrict))
10291 {
10292 if (rcStrict == VINF_SUCCESS)
10293 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10294 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10295 {
10296 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10297 rcStrict = VINF_SUCCESS;
10298 }
10299 return rcStrict;
10300 }
10301
10302 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10303 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10304
10305 VMXVEXITEVENTINFO ExitEventInfo;
10306 RT_ZERO(ExitEventInfo);
10307 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10308 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10309
10310 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10311 {
10312 VMXVEXITINFO ExitInfo;
10313 RT_ZERO(ExitInfo);
10314 ExitInfo.uReason = VMX_EXIT_EPT_VIOLATION;
10315 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10316 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10317 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
10318 ExitInfo.u64GuestPhysAddr = pVmxTransient->uGuestPhysicalAddr;
10319 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10320 }
10321
10322 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10323 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10324 }
10325
10326 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10327}
10328
10329
10330/**
10331 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10332 * Conditional VM-exit.
10333 */
10334HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10335{
10336 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10337 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10338
10339 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10340 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10341 {
10342 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
10343 AssertRCReturn(rc, rc);
10344
10345 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10346
10347 PGMPTWALK Walk;
10348 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10349 RTGCPHYS const GCPhysNested = pVmxTransient->uGuestPhysicalAddr;
10350 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, CPUMCTX2CORE(pCtx),
10351 GCPhysNested, false /* fIsLinearAddrValid */,
10352 0 /* GCPtrNested*/, &Walk);
10353 if (RT_SUCCESS(rcStrict))
10354 return VINF_EM_RAW_EMULATE_INSTR;
10355
10356 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10357 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10358
10359 VMXVEXITEVENTINFO ExitEventInfo;
10360 RT_ZERO(ExitEventInfo);
10361 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10362 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10363
10364 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10365 }
10366
10367 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10368}
10369# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10370
10371/** @} */
10372#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10373
10374
10375/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10376 * probes.
10377 *
10378 * The following few functions and associated structure contains the bloat
10379 * necessary for providing detailed debug events and dtrace probes as well as
10380 * reliable host side single stepping. This works on the principle of
10381 * "subclassing" the normal execution loop and workers. We replace the loop
10382 * method completely and override selected helpers to add necessary adjustments
10383 * to their core operation.
10384 *
10385 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10386 * any performance for debug and analysis features.
10387 *
10388 * @{
10389 */
10390
10391/**
10392 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10393 * the debug run loop.
10394 */
10395typedef struct VMXRUNDBGSTATE
10396{
10397 /** The RIP we started executing at. This is for detecting that we stepped. */
10398 uint64_t uRipStart;
10399 /** The CS we started executing with. */
10400 uint16_t uCsStart;
10401
10402 /** Whether we've actually modified the 1st execution control field. */
10403 bool fModifiedProcCtls : 1;
10404 /** Whether we've actually modified the 2nd execution control field. */
10405 bool fModifiedProcCtls2 : 1;
10406 /** Whether we've actually modified the exception bitmap. */
10407 bool fModifiedXcptBitmap : 1;
10408
10409 /** We desire the modified the CR0 mask to be cleared. */
10410 bool fClearCr0Mask : 1;
10411 /** We desire the modified the CR4 mask to be cleared. */
10412 bool fClearCr4Mask : 1;
10413 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10414 uint32_t fCpe1Extra;
10415 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10416 uint32_t fCpe1Unwanted;
10417 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10418 uint32_t fCpe2Extra;
10419 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10420 uint32_t bmXcptExtra;
10421 /** The sequence number of the Dtrace provider settings the state was
10422 * configured against. */
10423 uint32_t uDtraceSettingsSeqNo;
10424 /** VM-exits to check (one bit per VM-exit). */
10425 uint32_t bmExitsToCheck[3];
10426
10427 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10428 uint32_t fProcCtlsInitial;
10429 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10430 uint32_t fProcCtls2Initial;
10431 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10432 uint32_t bmXcptInitial;
10433} VMXRUNDBGSTATE;
10434AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10435typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10436
10437
10438/**
10439 * Initializes the VMXRUNDBGSTATE structure.
10440 *
10441 * @param pVCpu The cross context virtual CPU structure of the
10442 * calling EMT.
10443 * @param pVmxTransient The VMX-transient structure.
10444 * @param pDbgState The debug state to initialize.
10445 */
10446static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10447{
10448 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10449 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10450
10451 pDbgState->fModifiedProcCtls = false;
10452 pDbgState->fModifiedProcCtls2 = false;
10453 pDbgState->fModifiedXcptBitmap = false;
10454 pDbgState->fClearCr0Mask = false;
10455 pDbgState->fClearCr4Mask = false;
10456 pDbgState->fCpe1Extra = 0;
10457 pDbgState->fCpe1Unwanted = 0;
10458 pDbgState->fCpe2Extra = 0;
10459 pDbgState->bmXcptExtra = 0;
10460 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10461 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10462 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10463}
10464
10465
10466/**
10467 * Updates the VMSC fields with changes requested by @a pDbgState.
10468 *
10469 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10470 * immediately before executing guest code, i.e. when interrupts are disabled.
10471 * We don't check status codes here as we cannot easily assert or return in the
10472 * latter case.
10473 *
10474 * @param pVCpu The cross context virtual CPU structure.
10475 * @param pVmxTransient The VMX-transient structure.
10476 * @param pDbgState The debug state.
10477 */
10478static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10479{
10480 /*
10481 * Ensure desired flags in VMCS control fields are set.
10482 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10483 *
10484 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10485 * there should be no stale data in pCtx at this point.
10486 */
10487 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10488 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10489 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10490 {
10491 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10492 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10493 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10494 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10495 pDbgState->fModifiedProcCtls = true;
10496 }
10497
10498 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
10499 {
10500 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
10501 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
10502 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
10503 pDbgState->fModifiedProcCtls2 = true;
10504 }
10505
10506 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
10507 {
10508 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
10509 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
10510 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
10511 pDbgState->fModifiedXcptBitmap = true;
10512 }
10513
10514 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
10515 {
10516 pVmcsInfo->u64Cr0Mask = 0;
10517 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
10518 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
10519 }
10520
10521 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
10522 {
10523 pVmcsInfo->u64Cr4Mask = 0;
10524 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
10525 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
10526 }
10527
10528 NOREF(pVCpu);
10529}
10530
10531
10532/**
10533 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
10534 * re-entry next time around.
10535 *
10536 * @returns Strict VBox status code (i.e. informational status codes too).
10537 * @param pVCpu The cross context virtual CPU structure.
10538 * @param pVmxTransient The VMX-transient structure.
10539 * @param pDbgState The debug state.
10540 * @param rcStrict The return code from executing the guest using single
10541 * stepping.
10542 */
10543static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
10544 VBOXSTRICTRC rcStrict)
10545{
10546 /*
10547 * Restore VM-exit control settings as we may not reenter this function the
10548 * next time around.
10549 */
10550 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10551
10552 /* We reload the initial value, trigger what we can of recalculations the
10553 next time around. From the looks of things, that's all that's required atm. */
10554 if (pDbgState->fModifiedProcCtls)
10555 {
10556 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
10557 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
10558 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
10559 AssertRC(rc2);
10560 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
10561 }
10562
10563 /* We're currently the only ones messing with this one, so just restore the
10564 cached value and reload the field. */
10565 if ( pDbgState->fModifiedProcCtls2
10566 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
10567 {
10568 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
10569 AssertRC(rc2);
10570 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
10571 }
10572
10573 /* If we've modified the exception bitmap, we restore it and trigger
10574 reloading and partial recalculation the next time around. */
10575 if (pDbgState->fModifiedXcptBitmap)
10576 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
10577
10578 return rcStrict;
10579}
10580
10581
10582/**
10583 * Configures VM-exit controls for current DBGF and DTrace settings.
10584 *
10585 * This updates @a pDbgState and the VMCS execution control fields to reflect
10586 * the necessary VM-exits demanded by DBGF and DTrace.
10587 *
10588 * @param pVCpu The cross context virtual CPU structure.
10589 * @param pVmxTransient The VMX-transient structure. May update
10590 * fUpdatedTscOffsettingAndPreemptTimer.
10591 * @param pDbgState The debug state.
10592 */
10593static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10594{
10595#ifndef IN_NEM_DARWIN
10596 /*
10597 * Take down the dtrace serial number so we can spot changes.
10598 */
10599 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
10600 ASMCompilerBarrier();
10601#endif
10602
10603 /*
10604 * We'll rebuild most of the middle block of data members (holding the
10605 * current settings) as we go along here, so start by clearing it all.
10606 */
10607 pDbgState->bmXcptExtra = 0;
10608 pDbgState->fCpe1Extra = 0;
10609 pDbgState->fCpe1Unwanted = 0;
10610 pDbgState->fCpe2Extra = 0;
10611 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
10612 pDbgState->bmExitsToCheck[i] = 0;
10613
10614 /*
10615 * Software interrupts (INT XXh) - no idea how to trigger these...
10616 */
10617 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10618 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
10619 || VBOXVMM_INT_SOFTWARE_ENABLED())
10620 {
10621 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10622 }
10623
10624 /*
10625 * INT3 breakpoints - triggered by #BP exceptions.
10626 */
10627 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
10628 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10629
10630 /*
10631 * Exception bitmap and XCPT events+probes.
10632 */
10633 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
10634 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
10635 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
10636
10637 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
10638 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
10639 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10640 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
10641 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
10642 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
10643 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
10644 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
10645 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
10646 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
10647 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
10648 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
10649 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
10650 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
10651 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
10652 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
10653 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
10654 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
10655
10656 if (pDbgState->bmXcptExtra)
10657 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10658
10659 /*
10660 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
10661 *
10662 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
10663 * So, when adding/changing/removing please don't forget to update it.
10664 *
10665 * Some of the macros are picking up local variables to save horizontal space,
10666 * (being able to see it in a table is the lesser evil here).
10667 */
10668#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
10669 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
10670 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
10671#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
10672 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10673 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10674 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10675 } else do { } while (0)
10676#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
10677 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10678 { \
10679 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
10680 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10681 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10682 } else do { } while (0)
10683#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
10684 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10685 { \
10686 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
10687 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10688 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10689 } else do { } while (0)
10690#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
10691 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10692 { \
10693 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
10694 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10695 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10696 } else do { } while (0)
10697
10698 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
10699 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
10700 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
10701 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
10702 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
10703
10704 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
10705 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
10706 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
10707 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
10708 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
10709 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
10710 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
10711 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
10712 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
10713 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
10714 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
10715 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
10716 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
10717 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
10718 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
10719 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
10720 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
10721 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
10722 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
10723 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
10724 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
10725 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
10726 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
10727 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
10728 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
10729 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
10730 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
10731 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
10732 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
10733 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
10734 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
10735 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
10736 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
10737 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
10738 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
10739 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
10740
10741 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
10742 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10743 {
10744 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
10745 | CPUMCTX_EXTRN_APIC_TPR);
10746 AssertRC(rc);
10747
10748#if 0 /** @todo fix me */
10749 pDbgState->fClearCr0Mask = true;
10750 pDbgState->fClearCr4Mask = true;
10751#endif
10752 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
10753 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
10754 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10755 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
10756 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
10757 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
10758 require clearing here and in the loop if we start using it. */
10759 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
10760 }
10761 else
10762 {
10763 if (pDbgState->fClearCr0Mask)
10764 {
10765 pDbgState->fClearCr0Mask = false;
10766 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
10767 }
10768 if (pDbgState->fClearCr4Mask)
10769 {
10770 pDbgState->fClearCr4Mask = false;
10771 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
10772 }
10773 }
10774 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
10775 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
10776
10777 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
10778 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
10779 {
10780 /** @todo later, need to fix handler as it assumes this won't usually happen. */
10781 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
10782 }
10783 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
10784 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
10785
10786 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
10787 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
10788 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
10789 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
10790 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
10791 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
10792 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
10793 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
10794#if 0 /** @todo too slow, fix handler. */
10795 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
10796#endif
10797 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
10798
10799 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
10800 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
10801 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
10802 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
10803 {
10804 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10805 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
10806 }
10807 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10808 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10809 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10810 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10811
10812 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
10813 || IS_EITHER_ENABLED(pVM, INSTR_STR)
10814 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
10815 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
10816 {
10817 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10818 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
10819 }
10820 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
10821 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
10822 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
10823 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
10824
10825 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
10826 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
10827 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
10828 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
10829 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
10830 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
10831 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
10832 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
10833 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
10834 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
10835 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
10836 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
10837 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
10838 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
10839 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
10840 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
10841 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
10842 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
10843 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
10844 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
10845 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
10846 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
10847
10848#undef IS_EITHER_ENABLED
10849#undef SET_ONLY_XBM_IF_EITHER_EN
10850#undef SET_CPE1_XBM_IF_EITHER_EN
10851#undef SET_CPEU_XBM_IF_EITHER_EN
10852#undef SET_CPE2_XBM_IF_EITHER_EN
10853
10854 /*
10855 * Sanitize the control stuff.
10856 */
10857 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
10858 if (pDbgState->fCpe2Extra)
10859 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
10860 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
10861 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
10862#ifndef IN_NEM_DARWIN
10863 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10864 {
10865 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
10866 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10867 }
10868#else
10869 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10870 {
10871 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
10872 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10873 }
10874#endif
10875
10876 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
10877 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
10878 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
10879 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
10880}
10881
10882
10883/**
10884 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
10885 * appropriate.
10886 *
10887 * The caller has checked the VM-exit against the
10888 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
10889 * already, so we don't have to do that either.
10890 *
10891 * @returns Strict VBox status code (i.e. informational status codes too).
10892 * @param pVCpu The cross context virtual CPU structure.
10893 * @param pVmxTransient The VMX-transient structure.
10894 * @param uExitReason The VM-exit reason.
10895 *
10896 * @remarks The name of this function is displayed by dtrace, so keep it short
10897 * and to the point. No longer than 33 chars long, please.
10898 */
10899static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
10900{
10901 /*
10902 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
10903 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
10904 *
10905 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
10906 * does. Must add/change/remove both places. Same ordering, please.
10907 *
10908 * Added/removed events must also be reflected in the next section
10909 * where we dispatch dtrace events.
10910 */
10911 bool fDtrace1 = false;
10912 bool fDtrace2 = false;
10913 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
10914 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
10915 uint32_t uEventArg = 0;
10916#define SET_EXIT(a_EventSubName) \
10917 do { \
10918 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10919 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10920 } while (0)
10921#define SET_BOTH(a_EventSubName) \
10922 do { \
10923 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
10924 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10925 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
10926 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10927 } while (0)
10928 switch (uExitReason)
10929 {
10930 case VMX_EXIT_MTF:
10931 return vmxHCExitMtf(pVCpu, pVmxTransient);
10932
10933 case VMX_EXIT_XCPT_OR_NMI:
10934 {
10935 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
10936 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
10937 {
10938 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10939 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10940 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10941 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
10942 {
10943 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
10944 {
10945 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
10946 uEventArg = pVmxTransient->uExitIntErrorCode;
10947 }
10948 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
10949 switch (enmEvent1)
10950 {
10951 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
10952 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
10953 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
10954 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
10955 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
10956 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
10957 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
10958 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
10959 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
10960 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
10961 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
10962 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
10963 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
10964 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
10965 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
10966 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
10967 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
10968 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
10969 default: break;
10970 }
10971 }
10972 else
10973 AssertFailed();
10974 break;
10975
10976 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10977 uEventArg = idxVector;
10978 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
10979 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
10980 break;
10981 }
10982 break;
10983 }
10984
10985 case VMX_EXIT_TRIPLE_FAULT:
10986 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
10987 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
10988 break;
10989 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
10990 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
10991 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
10992 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
10993 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
10994
10995 /* Instruction specific VM-exits: */
10996 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
10997 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
10998 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
10999 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11000 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11001 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11002 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11003 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11004 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11005 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11006 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11007 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11008 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11009 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11010 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11011 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11012 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11013 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11014 case VMX_EXIT_MOV_CRX:
11015 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11016 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11017 SET_BOTH(CRX_READ);
11018 else
11019 SET_BOTH(CRX_WRITE);
11020 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11021 break;
11022 case VMX_EXIT_MOV_DRX:
11023 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11024 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11025 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11026 SET_BOTH(DRX_READ);
11027 else
11028 SET_BOTH(DRX_WRITE);
11029 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11030 break;
11031 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11032 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11033 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11034 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11035 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11036 case VMX_EXIT_GDTR_IDTR_ACCESS:
11037 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
11038 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11039 {
11040 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11041 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11042 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11043 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11044 }
11045 break;
11046
11047 case VMX_EXIT_LDTR_TR_ACCESS:
11048 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
11049 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11050 {
11051 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11052 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11053 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11054 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11055 }
11056 break;
11057
11058 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11059 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11060 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11061 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11062 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11063 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11064 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11065 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11066 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11067 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11068 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11069
11070 /* Events that aren't relevant at this point. */
11071 case VMX_EXIT_EXT_INT:
11072 case VMX_EXIT_INT_WINDOW:
11073 case VMX_EXIT_NMI_WINDOW:
11074 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11075 case VMX_EXIT_PREEMPT_TIMER:
11076 case VMX_EXIT_IO_INSTR:
11077 break;
11078
11079 /* Errors and unexpected events. */
11080 case VMX_EXIT_INIT_SIGNAL:
11081 case VMX_EXIT_SIPI:
11082 case VMX_EXIT_IO_SMI:
11083 case VMX_EXIT_SMI:
11084 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11085 case VMX_EXIT_ERR_MSR_LOAD:
11086 case VMX_EXIT_ERR_MACHINE_CHECK:
11087 case VMX_EXIT_PML_FULL:
11088 case VMX_EXIT_VIRTUALIZED_EOI:
11089 break;
11090
11091 default:
11092 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11093 break;
11094 }
11095#undef SET_BOTH
11096#undef SET_EXIT
11097
11098 /*
11099 * Dtrace tracepoints go first. We do them here at once so we don't
11100 * have to copy the guest state saving and stuff a few dozen times.
11101 * Down side is that we've got to repeat the switch, though this time
11102 * we use enmEvent since the probes are a subset of what DBGF does.
11103 */
11104 if (fDtrace1 || fDtrace2)
11105 {
11106 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11107 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11108 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11109 switch (enmEvent1)
11110 {
11111 /** @todo consider which extra parameters would be helpful for each probe. */
11112 case DBGFEVENT_END: break;
11113 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11114 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11115 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11116 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11117 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11118 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11119 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11120 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11121 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11122 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11123 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11124 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11125 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11126 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11127 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11128 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11129 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11130 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11131 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11132 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11133 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11134 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11135 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11136 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11137 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11138 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11139 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11140 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11141 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11142 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11143 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11144 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11145 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11146 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11147 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11148 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11149 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11150 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11151 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11152 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11153 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11154 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11155 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11156 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11157 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11158 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11159 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11160 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11161 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11162 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11163 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11164 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11165 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11166 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11167 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11168 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11169 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11170 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11171 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11172 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11173 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11174 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11175 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11176 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11177 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11178 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11179 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11180 }
11181 switch (enmEvent2)
11182 {
11183 /** @todo consider which extra parameters would be helpful for each probe. */
11184 case DBGFEVENT_END: break;
11185 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11186 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11187 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11188 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11189 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11190 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11191 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11192 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11193 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11194 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11195 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11196 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11197 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11198 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11199 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11200 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11201 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11202 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11203 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11204 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11205 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11206 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11207 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11208 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11209 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11210 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11211 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11212 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11213 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11214 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11215 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11216 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11217 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11218 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11219 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11220 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11221 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11222 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11223 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11224 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11225 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11226 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11227 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11228 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11229 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11230 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11231 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11232 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11233 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11234 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11235 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11236 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11237 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11238 }
11239 }
11240
11241 /*
11242 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11243 * the DBGF call will do a full check).
11244 *
11245 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11246 * Note! If we have to events, we prioritize the first, i.e. the instruction
11247 * one, in order to avoid event nesting.
11248 */
11249 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11250 if ( enmEvent1 != DBGFEVENT_END
11251 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11252 {
11253 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11254 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11255 if (rcStrict != VINF_SUCCESS)
11256 return rcStrict;
11257 }
11258 else if ( enmEvent2 != DBGFEVENT_END
11259 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11260 {
11261 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11262 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11263 if (rcStrict != VINF_SUCCESS)
11264 return rcStrict;
11265 }
11266
11267 return VINF_SUCCESS;
11268}
11269
11270
11271/**
11272 * Single-stepping VM-exit filtering.
11273 *
11274 * This is preprocessing the VM-exits and deciding whether we've gotten far
11275 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11276 * handling is performed.
11277 *
11278 * @returns Strict VBox status code (i.e. informational status codes too).
11279 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11280 * @param pVmxTransient The VMX-transient structure.
11281 * @param pDbgState The debug state.
11282 */
11283DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11284{
11285 /*
11286 * Expensive (saves context) generic dtrace VM-exit probe.
11287 */
11288 uint32_t const uExitReason = pVmxTransient->uExitReason;
11289 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11290 { /* more likely */ }
11291 else
11292 {
11293 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11294 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11295 AssertRC(rc);
11296 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11297 }
11298
11299#ifndef IN_NEM_DARWIN
11300 /*
11301 * Check for host NMI, just to get that out of the way.
11302 */
11303 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11304 { /* normally likely */ }
11305 else
11306 {
11307 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
11308 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11309 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11310 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11311 }
11312#endif
11313
11314 /*
11315 * Check for single stepping event if we're stepping.
11316 */
11317 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11318 {
11319 switch (uExitReason)
11320 {
11321 case VMX_EXIT_MTF:
11322 return vmxHCExitMtf(pVCpu, pVmxTransient);
11323
11324 /* Various events: */
11325 case VMX_EXIT_XCPT_OR_NMI:
11326 case VMX_EXIT_EXT_INT:
11327 case VMX_EXIT_TRIPLE_FAULT:
11328 case VMX_EXIT_INT_WINDOW:
11329 case VMX_EXIT_NMI_WINDOW:
11330 case VMX_EXIT_TASK_SWITCH:
11331 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11332 case VMX_EXIT_APIC_ACCESS:
11333 case VMX_EXIT_EPT_VIOLATION:
11334 case VMX_EXIT_EPT_MISCONFIG:
11335 case VMX_EXIT_PREEMPT_TIMER:
11336
11337 /* Instruction specific VM-exits: */
11338 case VMX_EXIT_CPUID:
11339 case VMX_EXIT_GETSEC:
11340 case VMX_EXIT_HLT:
11341 case VMX_EXIT_INVD:
11342 case VMX_EXIT_INVLPG:
11343 case VMX_EXIT_RDPMC:
11344 case VMX_EXIT_RDTSC:
11345 case VMX_EXIT_RSM:
11346 case VMX_EXIT_VMCALL:
11347 case VMX_EXIT_VMCLEAR:
11348 case VMX_EXIT_VMLAUNCH:
11349 case VMX_EXIT_VMPTRLD:
11350 case VMX_EXIT_VMPTRST:
11351 case VMX_EXIT_VMREAD:
11352 case VMX_EXIT_VMRESUME:
11353 case VMX_EXIT_VMWRITE:
11354 case VMX_EXIT_VMXOFF:
11355 case VMX_EXIT_VMXON:
11356 case VMX_EXIT_MOV_CRX:
11357 case VMX_EXIT_MOV_DRX:
11358 case VMX_EXIT_IO_INSTR:
11359 case VMX_EXIT_RDMSR:
11360 case VMX_EXIT_WRMSR:
11361 case VMX_EXIT_MWAIT:
11362 case VMX_EXIT_MONITOR:
11363 case VMX_EXIT_PAUSE:
11364 case VMX_EXIT_GDTR_IDTR_ACCESS:
11365 case VMX_EXIT_LDTR_TR_ACCESS:
11366 case VMX_EXIT_INVEPT:
11367 case VMX_EXIT_RDTSCP:
11368 case VMX_EXIT_INVVPID:
11369 case VMX_EXIT_WBINVD:
11370 case VMX_EXIT_XSETBV:
11371 case VMX_EXIT_RDRAND:
11372 case VMX_EXIT_INVPCID:
11373 case VMX_EXIT_VMFUNC:
11374 case VMX_EXIT_RDSEED:
11375 case VMX_EXIT_XSAVES:
11376 case VMX_EXIT_XRSTORS:
11377 {
11378 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11379 AssertRCReturn(rc, rc);
11380 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11381 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11382 return VINF_EM_DBG_STEPPED;
11383 break;
11384 }
11385
11386 /* Errors and unexpected events: */
11387 case VMX_EXIT_INIT_SIGNAL:
11388 case VMX_EXIT_SIPI:
11389 case VMX_EXIT_IO_SMI:
11390 case VMX_EXIT_SMI:
11391 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11392 case VMX_EXIT_ERR_MSR_LOAD:
11393 case VMX_EXIT_ERR_MACHINE_CHECK:
11394 case VMX_EXIT_PML_FULL:
11395 case VMX_EXIT_VIRTUALIZED_EOI:
11396 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11397 break;
11398
11399 default:
11400 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11401 break;
11402 }
11403 }
11404
11405 /*
11406 * Check for debugger event breakpoints and dtrace probes.
11407 */
11408 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11409 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11410 {
11411 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11412 if (rcStrict != VINF_SUCCESS)
11413 return rcStrict;
11414 }
11415
11416 /*
11417 * Normal processing.
11418 */
11419#ifdef HMVMX_USE_FUNCTION_TABLE
11420 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11421#else
11422 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11423#endif
11424}
11425
11426/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette