VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 94620

Last change on this file since 94620 was 94620, checked in by vboxsync, 3 years ago

VMM,Doxyfile.Core: Doxygen fixes for .cpp.h files.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 490.2 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 94620 2022-04-15 22:05:22Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
23# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
24#endif
25
26
27#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
28# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
29#endif
30
31
32/** Use the function table. */
33#define HMVMX_USE_FUNCTION_TABLE
34
35/** Determine which tagged-TLB flush handler to use. */
36#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
37#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
38#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
39#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
40
41/** Assert that all the given fields have been read from the VMCS. */
42#ifdef VBOX_STRICT
43# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
44 do { \
45 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
46 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
47 } while (0)
48#else
49# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
50#endif
51
52/**
53 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
54 * guest using hardware-assisted VMX.
55 *
56 * This excludes state like GPRs (other than RSP) which are always are
57 * swapped and restored across the world-switch and also registers like EFER,
58 * MSR which cannot be modified by the guest without causing a VM-exit.
59 */
60#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
61 | CPUMCTX_EXTRN_RFLAGS \
62 | CPUMCTX_EXTRN_RSP \
63 | CPUMCTX_EXTRN_SREG_MASK \
64 | CPUMCTX_EXTRN_TABLE_MASK \
65 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
66 | CPUMCTX_EXTRN_SYSCALL_MSRS \
67 | CPUMCTX_EXTRN_SYSENTER_MSRS \
68 | CPUMCTX_EXTRN_TSC_AUX \
69 | CPUMCTX_EXTRN_OTHER_MSRS \
70 | CPUMCTX_EXTRN_CR0 \
71 | CPUMCTX_EXTRN_CR3 \
72 | CPUMCTX_EXTRN_CR4 \
73 | CPUMCTX_EXTRN_DR7 \
74 | CPUMCTX_EXTRN_HWVIRT \
75 | CPUMCTX_EXTRN_INHIBIT_INT \
76 | CPUMCTX_EXTRN_INHIBIT_NMI)
77
78/**
79 * Exception bitmap mask for real-mode guests (real-on-v86).
80 *
81 * We need to intercept all exceptions manually except:
82 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
83 * due to bugs in Intel CPUs.
84 * - \#PF need not be intercepted even in real-mode if we have nested paging
85 * support.
86 */
87#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
88 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
89 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
90 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
91 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
92 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
93 | RT_BIT(X86_XCPT_XF))
94
95/** Maximum VM-instruction error number. */
96#define HMVMX_INSTR_ERROR_MAX 28
97
98/** Profiling macro. */
99#ifdef HM_PROFILE_EXIT_DISPATCH
100# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
101# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
102#else
103# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
104# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
105#endif
106
107#ifndef IN_NEM_DARWIN
108/** Assert that preemption is disabled or covered by thread-context hooks. */
109# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
110 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
111
112/** Assert that we haven't migrated CPUs when thread-context hooks are not
113 * used. */
114# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
115 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
116 ("Illegal migration! Entered on CPU %u Current %u\n", \
117 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
118#else
119# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
120# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
121#endif
122
123/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
124 * context. */
125#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
126 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
127 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
128
129/** Log the VM-exit reason with an easily visible marker to identify it in a
130 * potential sea of logging data. */
131#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
132 do { \
133 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
134 HMGetVmxExitName(a_uExitReason))); \
135 } while (0) \
136
137
138/*********************************************************************************************************************************
139* Structures and Typedefs *
140*********************************************************************************************************************************/
141/**
142 * Memory operand read or write access.
143 */
144typedef enum VMXMEMACCESS
145{
146 VMXMEMACCESS_READ = 0,
147 VMXMEMACCESS_WRITE = 1
148} VMXMEMACCESS;
149
150
151/**
152 * VMX VM-exit handler.
153 *
154 * @returns Strict VBox status code (i.e. informational status codes too).
155 * @param pVCpu The cross context virtual CPU structure.
156 * @param pVmxTransient The VMX-transient structure.
157 */
158#ifndef HMVMX_USE_FUNCTION_TABLE
159typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
160#else
161typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
162/** Pointer to VM-exit handler. */
163typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
164#endif
165
166/**
167 * VMX VM-exit handler, non-strict status code.
168 *
169 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
170 *
171 * @returns VBox status code, no informational status code returned.
172 * @param pVCpu The cross context virtual CPU structure.
173 * @param pVmxTransient The VMX-transient structure.
174 *
175 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
176 * use of that status code will be replaced with VINF_EM_SOMETHING
177 * later when switching over to IEM.
178 */
179#ifndef HMVMX_USE_FUNCTION_TABLE
180typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
181#else
182typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
183#endif
184
185
186/*********************************************************************************************************************************
187* Internal Functions *
188*********************************************************************************************************************************/
189#ifndef HMVMX_USE_FUNCTION_TABLE
190DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
191# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
192# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
193#else
194# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
195# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
196#endif
197#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
198DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
199#endif
200
201static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
202
203/** @name VM-exit handler prototypes.
204 * @{
205 */
206static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
207static FNVMXEXITHANDLER vmxHCExitExtInt;
208static FNVMXEXITHANDLER vmxHCExitTripleFault;
209static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
210static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
211static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
212static FNVMXEXITHANDLER vmxHCExitCpuid;
213static FNVMXEXITHANDLER vmxHCExitGetsec;
214static FNVMXEXITHANDLER vmxHCExitHlt;
215static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
216static FNVMXEXITHANDLER vmxHCExitInvlpg;
217static FNVMXEXITHANDLER vmxHCExitRdpmc;
218static FNVMXEXITHANDLER vmxHCExitVmcall;
219#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
220static FNVMXEXITHANDLER vmxHCExitVmclear;
221static FNVMXEXITHANDLER vmxHCExitVmlaunch;
222static FNVMXEXITHANDLER vmxHCExitVmptrld;
223static FNVMXEXITHANDLER vmxHCExitVmptrst;
224static FNVMXEXITHANDLER vmxHCExitVmread;
225static FNVMXEXITHANDLER vmxHCExitVmresume;
226static FNVMXEXITHANDLER vmxHCExitVmwrite;
227static FNVMXEXITHANDLER vmxHCExitVmxoff;
228static FNVMXEXITHANDLER vmxHCExitVmxon;
229static FNVMXEXITHANDLER vmxHCExitInvvpid;
230# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
231static FNVMXEXITHANDLER vmxHCExitInvept;
232# endif
233#endif
234static FNVMXEXITHANDLER vmxHCExitRdtsc;
235static FNVMXEXITHANDLER vmxHCExitMovCRx;
236static FNVMXEXITHANDLER vmxHCExitMovDRx;
237static FNVMXEXITHANDLER vmxHCExitIoInstr;
238static FNVMXEXITHANDLER vmxHCExitRdmsr;
239static FNVMXEXITHANDLER vmxHCExitWrmsr;
240static FNVMXEXITHANDLER vmxHCExitMwait;
241static FNVMXEXITHANDLER vmxHCExitMtf;
242static FNVMXEXITHANDLER vmxHCExitMonitor;
243static FNVMXEXITHANDLER vmxHCExitPause;
244static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
245static FNVMXEXITHANDLER vmxHCExitApicAccess;
246static FNVMXEXITHANDLER vmxHCExitEptViolation;
247static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
248static FNVMXEXITHANDLER vmxHCExitRdtscp;
249static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
250static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
251static FNVMXEXITHANDLER vmxHCExitXsetbv;
252static FNVMXEXITHANDLER vmxHCExitInvpcid;
253static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
254static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
255static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
256/** @} */
257
258#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
259/** @name Nested-guest VM-exit handler prototypes.
260 * @{
261 */
262static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
263static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
264static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
265static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
266static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
267static FNVMXEXITHANDLER vmxHCExitHltNested;
268static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
269static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
270static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
271static FNVMXEXITHANDLER vmxHCExitRdtscNested;
272static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
273static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
274static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
275static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
276static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
277static FNVMXEXITHANDLER vmxHCExitMwaitNested;
278static FNVMXEXITHANDLER vmxHCExitMtfNested;
279static FNVMXEXITHANDLER vmxHCExitMonitorNested;
280static FNVMXEXITHANDLER vmxHCExitPauseNested;
281static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
282static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
283static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
284static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
286static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
287static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
288static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
289static FNVMXEXITHANDLER vmxHCExitInstrNested;
290static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
291# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
292static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
293static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
294# endif
295/** @} */
296#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
297
298
299/*********************************************************************************************************************************
300* Global Variables *
301*********************************************************************************************************************************/
302#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
303/**
304 * Array of all VMCS fields.
305 * Any fields added to the VT-x spec. should be added here.
306 *
307 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
308 * of nested-guests.
309 */
310static const uint32_t g_aVmcsFields[] =
311{
312 /* 16-bit control fields. */
313 VMX_VMCS16_VPID,
314 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
315 VMX_VMCS16_EPTP_INDEX,
316
317 /* 16-bit guest-state fields. */
318 VMX_VMCS16_GUEST_ES_SEL,
319 VMX_VMCS16_GUEST_CS_SEL,
320 VMX_VMCS16_GUEST_SS_SEL,
321 VMX_VMCS16_GUEST_DS_SEL,
322 VMX_VMCS16_GUEST_FS_SEL,
323 VMX_VMCS16_GUEST_GS_SEL,
324 VMX_VMCS16_GUEST_LDTR_SEL,
325 VMX_VMCS16_GUEST_TR_SEL,
326 VMX_VMCS16_GUEST_INTR_STATUS,
327 VMX_VMCS16_GUEST_PML_INDEX,
328
329 /* 16-bits host-state fields. */
330 VMX_VMCS16_HOST_ES_SEL,
331 VMX_VMCS16_HOST_CS_SEL,
332 VMX_VMCS16_HOST_SS_SEL,
333 VMX_VMCS16_HOST_DS_SEL,
334 VMX_VMCS16_HOST_FS_SEL,
335 VMX_VMCS16_HOST_GS_SEL,
336 VMX_VMCS16_HOST_TR_SEL,
337
338 /* 64-bit control fields. */
339 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
340 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
341 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
342 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
343 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
344 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
345 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
346 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
347 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
348 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
349 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
350 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
351 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
352 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
353 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
354 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
355 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
356 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
357 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
358 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
359 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
360 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
361 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
362 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
363 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
364 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
365 VMX_VMCS64_CTRL_EPTP_FULL,
366 VMX_VMCS64_CTRL_EPTP_HIGH,
367 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
368 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
369 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
370 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
371 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
372 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
373 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
374 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
375 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
376 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
377 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
378 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
379 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
380 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
381 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
382 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
383 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
384 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
385 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
386 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
387 VMX_VMCS64_CTRL_SPPTP_FULL,
388 VMX_VMCS64_CTRL_SPPTP_HIGH,
389 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
390 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
391 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
392 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
393 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
394 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
395
396 /* 64-bit read-only data fields. */
397 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
398 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
399
400 /* 64-bit guest-state fields. */
401 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
402 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
403 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
404 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
405 VMX_VMCS64_GUEST_PAT_FULL,
406 VMX_VMCS64_GUEST_PAT_HIGH,
407 VMX_VMCS64_GUEST_EFER_FULL,
408 VMX_VMCS64_GUEST_EFER_HIGH,
409 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
410 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
411 VMX_VMCS64_GUEST_PDPTE0_FULL,
412 VMX_VMCS64_GUEST_PDPTE0_HIGH,
413 VMX_VMCS64_GUEST_PDPTE1_FULL,
414 VMX_VMCS64_GUEST_PDPTE1_HIGH,
415 VMX_VMCS64_GUEST_PDPTE2_FULL,
416 VMX_VMCS64_GUEST_PDPTE2_HIGH,
417 VMX_VMCS64_GUEST_PDPTE3_FULL,
418 VMX_VMCS64_GUEST_PDPTE3_HIGH,
419 VMX_VMCS64_GUEST_BNDCFGS_FULL,
420 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
421 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
422 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
423 VMX_VMCS64_GUEST_PKRS_FULL,
424 VMX_VMCS64_GUEST_PKRS_HIGH,
425
426 /* 64-bit host-state fields. */
427 VMX_VMCS64_HOST_PAT_FULL,
428 VMX_VMCS64_HOST_PAT_HIGH,
429 VMX_VMCS64_HOST_EFER_FULL,
430 VMX_VMCS64_HOST_EFER_HIGH,
431 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
432 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
433 VMX_VMCS64_HOST_PKRS_FULL,
434 VMX_VMCS64_HOST_PKRS_HIGH,
435
436 /* 32-bit control fields. */
437 VMX_VMCS32_CTRL_PIN_EXEC,
438 VMX_VMCS32_CTRL_PROC_EXEC,
439 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
440 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
441 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
442 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
443 VMX_VMCS32_CTRL_EXIT,
444 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
445 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
446 VMX_VMCS32_CTRL_ENTRY,
447 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
448 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
449 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
450 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
451 VMX_VMCS32_CTRL_TPR_THRESHOLD,
452 VMX_VMCS32_CTRL_PROC_EXEC2,
453 VMX_VMCS32_CTRL_PLE_GAP,
454 VMX_VMCS32_CTRL_PLE_WINDOW,
455
456 /* 32-bits read-only fields. */
457 VMX_VMCS32_RO_VM_INSTR_ERROR,
458 VMX_VMCS32_RO_EXIT_REASON,
459 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
460 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
461 VMX_VMCS32_RO_IDT_VECTORING_INFO,
462 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
463 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
464 VMX_VMCS32_RO_EXIT_INSTR_INFO,
465
466 /* 32-bit guest-state fields. */
467 VMX_VMCS32_GUEST_ES_LIMIT,
468 VMX_VMCS32_GUEST_CS_LIMIT,
469 VMX_VMCS32_GUEST_SS_LIMIT,
470 VMX_VMCS32_GUEST_DS_LIMIT,
471 VMX_VMCS32_GUEST_FS_LIMIT,
472 VMX_VMCS32_GUEST_GS_LIMIT,
473 VMX_VMCS32_GUEST_LDTR_LIMIT,
474 VMX_VMCS32_GUEST_TR_LIMIT,
475 VMX_VMCS32_GUEST_GDTR_LIMIT,
476 VMX_VMCS32_GUEST_IDTR_LIMIT,
477 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
478 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
479 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
480 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
481 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
482 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
483 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
484 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
485 VMX_VMCS32_GUEST_INT_STATE,
486 VMX_VMCS32_GUEST_ACTIVITY_STATE,
487 VMX_VMCS32_GUEST_SMBASE,
488 VMX_VMCS32_GUEST_SYSENTER_CS,
489 VMX_VMCS32_PREEMPT_TIMER_VALUE,
490
491 /* 32-bit host-state fields. */
492 VMX_VMCS32_HOST_SYSENTER_CS,
493
494 /* Natural-width control fields. */
495 VMX_VMCS_CTRL_CR0_MASK,
496 VMX_VMCS_CTRL_CR4_MASK,
497 VMX_VMCS_CTRL_CR0_READ_SHADOW,
498 VMX_VMCS_CTRL_CR4_READ_SHADOW,
499 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
500 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
501 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
502 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
503
504 /* Natural-width read-only data fields. */
505 VMX_VMCS_RO_EXIT_QUALIFICATION,
506 VMX_VMCS_RO_IO_RCX,
507 VMX_VMCS_RO_IO_RSI,
508 VMX_VMCS_RO_IO_RDI,
509 VMX_VMCS_RO_IO_RIP,
510 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
511
512 /* Natural-width guest-state field */
513 VMX_VMCS_GUEST_CR0,
514 VMX_VMCS_GUEST_CR3,
515 VMX_VMCS_GUEST_CR4,
516 VMX_VMCS_GUEST_ES_BASE,
517 VMX_VMCS_GUEST_CS_BASE,
518 VMX_VMCS_GUEST_SS_BASE,
519 VMX_VMCS_GUEST_DS_BASE,
520 VMX_VMCS_GUEST_FS_BASE,
521 VMX_VMCS_GUEST_GS_BASE,
522 VMX_VMCS_GUEST_LDTR_BASE,
523 VMX_VMCS_GUEST_TR_BASE,
524 VMX_VMCS_GUEST_GDTR_BASE,
525 VMX_VMCS_GUEST_IDTR_BASE,
526 VMX_VMCS_GUEST_DR7,
527 VMX_VMCS_GUEST_RSP,
528 VMX_VMCS_GUEST_RIP,
529 VMX_VMCS_GUEST_RFLAGS,
530 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
531 VMX_VMCS_GUEST_SYSENTER_ESP,
532 VMX_VMCS_GUEST_SYSENTER_EIP,
533 VMX_VMCS_GUEST_S_CET,
534 VMX_VMCS_GUEST_SSP,
535 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
536
537 /* Natural-width host-state fields */
538 VMX_VMCS_HOST_CR0,
539 VMX_VMCS_HOST_CR3,
540 VMX_VMCS_HOST_CR4,
541 VMX_VMCS_HOST_FS_BASE,
542 VMX_VMCS_HOST_GS_BASE,
543 VMX_VMCS_HOST_TR_BASE,
544 VMX_VMCS_HOST_GDTR_BASE,
545 VMX_VMCS_HOST_IDTR_BASE,
546 VMX_VMCS_HOST_SYSENTER_ESP,
547 VMX_VMCS_HOST_SYSENTER_EIP,
548 VMX_VMCS_HOST_RSP,
549 VMX_VMCS_HOST_RIP,
550 VMX_VMCS_HOST_S_CET,
551 VMX_VMCS_HOST_SSP,
552 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
553};
554#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
555
556#ifdef VBOX_STRICT
557static const uint32_t g_aVmcsSegBase[] =
558{
559 VMX_VMCS_GUEST_ES_BASE,
560 VMX_VMCS_GUEST_CS_BASE,
561 VMX_VMCS_GUEST_SS_BASE,
562 VMX_VMCS_GUEST_DS_BASE,
563 VMX_VMCS_GUEST_FS_BASE,
564 VMX_VMCS_GUEST_GS_BASE
565};
566static const uint32_t g_aVmcsSegSel[] =
567{
568 VMX_VMCS16_GUEST_ES_SEL,
569 VMX_VMCS16_GUEST_CS_SEL,
570 VMX_VMCS16_GUEST_SS_SEL,
571 VMX_VMCS16_GUEST_DS_SEL,
572 VMX_VMCS16_GUEST_FS_SEL,
573 VMX_VMCS16_GUEST_GS_SEL
574};
575static const uint32_t g_aVmcsSegLimit[] =
576{
577 VMX_VMCS32_GUEST_ES_LIMIT,
578 VMX_VMCS32_GUEST_CS_LIMIT,
579 VMX_VMCS32_GUEST_SS_LIMIT,
580 VMX_VMCS32_GUEST_DS_LIMIT,
581 VMX_VMCS32_GUEST_FS_LIMIT,
582 VMX_VMCS32_GUEST_GS_LIMIT
583};
584static const uint32_t g_aVmcsSegAttr[] =
585{
586 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
587 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
588 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
589 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
590 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
591 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
592};
593AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT);
594AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
595AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT);
596AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT);
597#endif /* VBOX_STRICT */
598
599#ifdef HMVMX_USE_FUNCTION_TABLE
600/**
601 * VMX_EXIT dispatch table.
602 */
603static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
604{
605 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
606 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
607 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
608 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
609 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
610 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
611 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
612 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
613 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
614 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
615 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
616 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
617 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
618 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
619 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
620 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
621 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
622 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
623 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
624#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
625 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
626 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
627 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
628 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
629 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
630 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
631 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
632 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
633 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
634#else
635 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
636 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
637 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
638 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
639 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
640 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
641 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
642 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
643 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
644#endif
645 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
646 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
647 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
648 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
649 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
650 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
651 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
652 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
653 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
654 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
655 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
656 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
657 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
658 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
659 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
660 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
661 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
662 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
663 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
664 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
665 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
666 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
667#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT)
668 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
669#else
670 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
671#endif
672 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
673 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
674#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
675 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
676#else
677 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
678#endif
679 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
680 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
681 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
682 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
683 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
684 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
685 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
686 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
687 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
688 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
689 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
690 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
691 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
692 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
693 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
694 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
695};
696#endif /* HMVMX_USE_FUNCTION_TABLE */
697
698#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
699static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
700{
701 /* 0 */ "(Not Used)",
702 /* 1 */ "VMCALL executed in VMX root operation.",
703 /* 2 */ "VMCLEAR with invalid physical address.",
704 /* 3 */ "VMCLEAR with VMXON pointer.",
705 /* 4 */ "VMLAUNCH with non-clear VMCS.",
706 /* 5 */ "VMRESUME with non-launched VMCS.",
707 /* 6 */ "VMRESUME after VMXOFF",
708 /* 7 */ "VM-entry with invalid control fields.",
709 /* 8 */ "VM-entry with invalid host state fields.",
710 /* 9 */ "VMPTRLD with invalid physical address.",
711 /* 10 */ "VMPTRLD with VMXON pointer.",
712 /* 11 */ "VMPTRLD with incorrect revision identifier.",
713 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
714 /* 13 */ "VMWRITE to read-only VMCS component.",
715 /* 14 */ "(Not Used)",
716 /* 15 */ "VMXON executed in VMX root operation.",
717 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
718 /* 17 */ "VM-entry with non-launched executing VMCS.",
719 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
720 /* 19 */ "VMCALL with non-clear VMCS.",
721 /* 20 */ "VMCALL with invalid VM-exit control fields.",
722 /* 21 */ "(Not Used)",
723 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
724 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
725 /* 24 */ "VMCALL with invalid SMM-monitor features.",
726 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
727 /* 26 */ "VM-entry with events blocked by MOV SS.",
728 /* 27 */ "(Not Used)",
729 /* 28 */ "Invalid operand to INVEPT/INVVPID."
730};
731#endif /* VBOX_STRICT && LOG_ENABLED */
732
733
734/**
735 * Gets the CR0 guest/host mask.
736 *
737 * These bits typically does not change through the lifetime of a VM. Any bit set in
738 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
739 * by the guest.
740 *
741 * @returns The CR0 guest/host mask.
742 * @param pVCpu The cross context virtual CPU structure.
743 */
744static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
745{
746 /*
747 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
748 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
749 *
750 * Furthermore, modifications to any bits that are reserved/unspecified currently
751 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
752 * when future CPUs specify and use currently reserved/unspecified bits.
753 */
754 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
755 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
756 * and @bugref{6944}. */
757 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
758 return ( X86_CR0_PE
759 | X86_CR0_NE
760 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
761 | X86_CR0_PG
762 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
763}
764
765
766/**
767 * Gets the CR4 guest/host mask.
768 *
769 * These bits typically does not change through the lifetime of a VM. Any bit set in
770 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
771 * by the guest.
772 *
773 * @returns The CR4 guest/host mask.
774 * @param pVCpu The cross context virtual CPU structure.
775 */
776static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
777{
778 /*
779 * We construct a mask of all CR4 bits that the guest can modify without causing
780 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
781 * a VM-exit when the guest attempts to modify them when executing using
782 * hardware-assisted VMX.
783 *
784 * When a feature is not exposed to the guest (and may be present on the host),
785 * we want to intercept guest modifications to the bit so we can emulate proper
786 * behavior (e.g., #GP).
787 *
788 * Furthermore, only modifications to those bits that don't require immediate
789 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
790 * depends on CR3 which might not always be the guest value while executing
791 * using hardware-assisted VMX.
792 */
793 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
794 bool const fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
795 bool const fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
796 bool const fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
797
798 /*
799 * Paranoia.
800 * Ensure features exposed to the guest are present on the host.
801 */
802 Assert(!fFsGsBase || pVM->cpum.ro.HostFeatures.fFsGsBase);
803 Assert(!fXSaveRstor || pVM->cpum.ro.HostFeatures.fXSaveRstor);
804 Assert(!fFxSaveRstor || pVM->cpum.ro.HostFeatures.fFxSaveRstor);
805
806 uint64_t const fGstMask = ( X86_CR4_PVI
807 | X86_CR4_TSD
808 | X86_CR4_DE
809 | X86_CR4_MCE
810 | X86_CR4_PCE
811 | X86_CR4_OSXMMEEXCPT
812 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
813 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
814 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0));
815 return ~fGstMask;
816}
817
818
819/**
820 * Adds one or more exceptions to the exception bitmap and commits it to the current
821 * VMCS.
822 *
823 * @param pVCpu The cross context virtual CPU structure.
824 * @param pVmxTransient The VMX-transient structure.
825 * @param uXcptMask The exception(s) to add.
826 */
827static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
828{
829 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
830 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
831 if ((uXcptBitmap & uXcptMask) != uXcptMask)
832 {
833 uXcptBitmap |= uXcptMask;
834 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
835 AssertRC(rc);
836 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
837 }
838}
839
840
841/**
842 * Adds an exception to the exception bitmap and commits it to the current VMCS.
843 *
844 * @param pVCpu The cross context virtual CPU structure.
845 * @param pVmxTransient The VMX-transient structure.
846 * @param uXcpt The exception to add.
847 */
848static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
849{
850 Assert(uXcpt <= X86_XCPT_LAST);
851 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
852}
853
854
855/**
856 * Remove one or more exceptions from the exception bitmap and commits it to the
857 * current VMCS.
858 *
859 * This takes care of not removing the exception intercept if a nested-guest
860 * requires the exception to be intercepted.
861 *
862 * @returns VBox status code.
863 * @param pVCpu The cross context virtual CPU structure.
864 * @param pVmxTransient The VMX-transient structure.
865 * @param uXcptMask The exception(s) to remove.
866 */
867static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
868{
869 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
870 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
871 if (u32XcptBitmap & uXcptMask)
872 {
873#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
874 if (!pVmxTransient->fIsNestedGuest)
875 { /* likely */ }
876 else
877 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
878#endif
879#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
880 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
881 | RT_BIT(X86_XCPT_DE)
882 | RT_BIT(X86_XCPT_NM)
883 | RT_BIT(X86_XCPT_TS)
884 | RT_BIT(X86_XCPT_UD)
885 | RT_BIT(X86_XCPT_NP)
886 | RT_BIT(X86_XCPT_SS)
887 | RT_BIT(X86_XCPT_GP)
888 | RT_BIT(X86_XCPT_PF)
889 | RT_BIT(X86_XCPT_MF));
890#elif defined(HMVMX_ALWAYS_TRAP_PF)
891 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
892#endif
893 if (uXcptMask)
894 {
895 /* Validate we are not removing any essential exception intercepts. */
896#ifndef IN_NEM_DARWIN
897 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
898#else
899 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
900#endif
901 NOREF(pVCpu);
902 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
903 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
904
905 /* Remove it from the exception bitmap. */
906 u32XcptBitmap &= ~uXcptMask;
907
908 /* Commit and update the cache if necessary. */
909 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
910 {
911 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
912 AssertRC(rc);
913 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
914 }
915 }
916 }
917 return VINF_SUCCESS;
918}
919
920
921/**
922 * Remove an exceptions from the exception bitmap and commits it to the current
923 * VMCS.
924 *
925 * @returns VBox status code.
926 * @param pVCpu The cross context virtual CPU structure.
927 * @param pVmxTransient The VMX-transient structure.
928 * @param uXcpt The exception to remove.
929 */
930static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
931{
932 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
933}
934
935
936#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
937/**
938 * Loads the shadow VMCS specified by the VMCS info. object.
939 *
940 * @returns VBox status code.
941 * @param pVmcsInfo The VMCS info. object.
942 *
943 * @remarks Can be called with interrupts disabled.
944 */
945static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
946{
947 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
948 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
949
950 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
951 if (RT_SUCCESS(rc))
952 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
953 return rc;
954}
955
956
957/**
958 * Clears the shadow VMCS specified by the VMCS info. object.
959 *
960 * @returns VBox status code.
961 * @param pVmcsInfo The VMCS info. object.
962 *
963 * @remarks Can be called with interrupts disabled.
964 */
965static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
966{
967 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
968 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
969
970 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
971 if (RT_SUCCESS(rc))
972 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
973 return rc;
974}
975
976
977/**
978 * Switches from and to the specified VMCSes.
979 *
980 * @returns VBox status code.
981 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
982 * @param pVmcsInfoTo The VMCS info. object we are switching to.
983 *
984 * @remarks Called with interrupts disabled.
985 */
986static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
987{
988 /*
989 * Clear the VMCS we are switching out if it has not already been cleared.
990 * This will sync any CPU internal data back to the VMCS.
991 */
992 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
993 {
994 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
995 if (RT_SUCCESS(rc))
996 {
997 /*
998 * The shadow VMCS, if any, would not be active at this point since we
999 * would have cleared it while importing the virtual hardware-virtualization
1000 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
1001 * clear the shadow VMCS here, just assert for safety.
1002 */
1003 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
1004 }
1005 else
1006 return rc;
1007 }
1008
1009 /*
1010 * Clear the VMCS we are switching to if it has not already been cleared.
1011 * This will initialize the VMCS launch state to "clear" required for loading it.
1012 *
1013 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1014 */
1015 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1016 {
1017 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1018 if (RT_SUCCESS(rc))
1019 { /* likely */ }
1020 else
1021 return rc;
1022 }
1023
1024 /*
1025 * Finally, load the VMCS we are switching to.
1026 */
1027 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1028}
1029
1030
1031/**
1032 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1033 * caller.
1034 *
1035 * @returns VBox status code.
1036 * @param pVCpu The cross context virtual CPU structure.
1037 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1038 * true) or guest VMCS (pass false).
1039 */
1040static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1041{
1042 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1043 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1044
1045 PVMXVMCSINFO pVmcsInfoFrom;
1046 PVMXVMCSINFO pVmcsInfoTo;
1047 if (fSwitchToNstGstVmcs)
1048 {
1049 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1050 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1051 }
1052 else
1053 {
1054 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1055 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1056 }
1057
1058 /*
1059 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1060 * preemption hook code path acquires the current VMCS.
1061 */
1062 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1063
1064 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1065 if (RT_SUCCESS(rc))
1066 {
1067 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1068 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1069
1070 /*
1071 * If we are switching to a VMCS that was executed on a different host CPU or was
1072 * never executed before, flag that we need to export the host state before executing
1073 * guest/nested-guest code using hardware-assisted VMX.
1074 *
1075 * This could probably be done in a preemptible context since the preemption hook
1076 * will flag the necessary change in host context. However, since preemption is
1077 * already disabled and to avoid making assumptions about host specific code in
1078 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1079 * disabled.
1080 */
1081 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1082 { /* likely */ }
1083 else
1084 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1085
1086 ASMSetFlags(fEFlags);
1087
1088 /*
1089 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1090 * flag that we need to update the host MSR values there. Even if we decide in the
1091 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1092 * if its content differs, we would have to update the host MSRs anyway.
1093 */
1094 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1095 }
1096 else
1097 ASMSetFlags(fEFlags);
1098 return rc;
1099}
1100#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1101
1102
1103#ifdef VBOX_STRICT
1104/**
1105 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1106 * transient structure.
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @param pVmxTransient The VMX-transient structure.
1110 */
1111DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1112{
1113 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1114 AssertRC(rc);
1115}
1116
1117
1118/**
1119 * Reads the VM-entry exception error code field from the VMCS into
1120 * the VMX transient structure.
1121 *
1122 * @param pVCpu The cross context virtual CPU structure.
1123 * @param pVmxTransient The VMX-transient structure.
1124 */
1125DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1126{
1127 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1128 AssertRC(rc);
1129}
1130
1131
1132/**
1133 * Reads the VM-entry exception error code field from the VMCS into
1134 * the VMX transient structure.
1135 *
1136 * @param pVCpu The cross context virtual CPU structure.
1137 * @param pVmxTransient The VMX-transient structure.
1138 */
1139DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1140{
1141 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1142 AssertRC(rc);
1143}
1144#endif /* VBOX_STRICT */
1145
1146
1147/**
1148 * Reads the VM-exit interruption-information field from the VMCS into the VMX
1149 * transient structure.
1150 *
1151 * @param pVCpu The cross context virtual CPU structure.
1152 * @param pVmxTransient The VMX-transient structure.
1153 */
1154DECLINLINE(void) vmxHCReadExitIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1155{
1156 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1157 {
1158 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1159 AssertRC(rc);
1160 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
1161 }
1162}
1163
1164
1165/**
1166 * Reads the VM-exit interruption error code from the VMCS into the VMX
1167 * transient structure.
1168 *
1169 * @param pVCpu The cross context virtual CPU structure.
1170 * @param pVmxTransient The VMX-transient structure.
1171 */
1172DECLINLINE(void) vmxHCReadExitIntErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1173{
1174 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1175 {
1176 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1177 AssertRC(rc);
1178 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
1179 }
1180}
1181
1182
1183/**
1184 * Reads the VM-exit instruction length field from the VMCS into the VMX
1185 * transient structure.
1186 *
1187 * @param pVCpu The cross context virtual CPU structure.
1188 * @param pVmxTransient The VMX-transient structure.
1189 */
1190DECLINLINE(void) vmxHCReadExitInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1191{
1192 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1193 {
1194 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1195 AssertRC(rc);
1196 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
1197 }
1198}
1199
1200
1201/**
1202 * Reads the VM-exit instruction-information field from the VMCS into
1203 * the VMX transient structure.
1204 *
1205 * @param pVCpu The cross context virtual CPU structure.
1206 * @param pVmxTransient The VMX-transient structure.
1207 */
1208DECLINLINE(void) vmxHCReadExitInstrInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1209{
1210 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1211 {
1212 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1213 AssertRC(rc);
1214 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
1215 }
1216}
1217
1218
1219/**
1220 * Reads the Exit Qualification from the VMCS into the VMX transient structure.
1221 *
1222 * @param pVCpu The cross context virtual CPU structure.
1223 * @param pVmxTransient The VMX-transient structure.
1224 */
1225DECLINLINE(void) vmxHCReadExitQualVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1226{
1227 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1228 {
1229 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1230 AssertRC(rc);
1231 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
1232 }
1233}
1234
1235
1236/**
1237 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
1238 *
1239 * @param pVCpu The cross context virtual CPU structure.
1240 * @param pVmxTransient The VMX-transient structure.
1241 */
1242DECLINLINE(void) vmxHCReadGuestLinearAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1243{
1244 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1245 {
1246 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1247 AssertRC(rc);
1248 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
1249 }
1250}
1251
1252
1253/**
1254 * Reads the Guest-physical address from the VMCS into the VMX transient structure.
1255 *
1256 * @param pVCpu The cross context virtual CPU structure.
1257 * @param pVmxTransient The VMX-transient structure.
1258 */
1259DECLINLINE(void) vmxHCReadGuestPhysicalAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1260{
1261 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1262 {
1263 int rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1264 AssertRC(rc);
1265 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PHYSICAL_ADDR;
1266 }
1267}
1268
1269#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1270/**
1271 * Reads the Guest pending-debug exceptions from the VMCS into the VMX transient
1272 * structure.
1273 *
1274 * @param pVCpu The cross context virtual CPU structure.
1275 * @param pVmxTransient The VMX-transient structure.
1276 */
1277DECLINLINE(void) vmxHCReadGuestPendingDbgXctps(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1278{
1279 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1280 {
1281 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1282 AssertRC(rc);
1283 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PENDING_DBG_XCPTS;
1284 }
1285}
1286#endif
1287
1288/**
1289 * Reads the IDT-vectoring information field from the VMCS into the VMX
1290 * transient structure.
1291 *
1292 * @param pVCpu The cross context virtual CPU structure.
1293 * @param pVmxTransient The VMX-transient structure.
1294 *
1295 * @remarks No-long-jump zone!!!
1296 */
1297DECLINLINE(void) vmxHCReadIdtVectoringInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1298{
1299 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1300 {
1301 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1302 AssertRC(rc);
1303 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
1304 }
1305}
1306
1307
1308/**
1309 * Reads the IDT-vectoring error code from the VMCS into the VMX
1310 * transient structure.
1311 *
1312 * @param pVCpu The cross context virtual CPU structure.
1313 * @param pVmxTransient The VMX-transient structure.
1314 */
1315DECLINLINE(void) vmxHCReadIdtVectoringErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1316{
1317 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1318 {
1319 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1320 AssertRC(rc);
1321 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
1322 }
1323}
1324
1325#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1326/**
1327 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1328 *
1329 * @param pVCpu The cross context virtual CPU structure.
1330 * @param pVmxTransient The VMX-transient structure.
1331 */
1332static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1333{
1334 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1335 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1336 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1337 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1338 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1339 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1340 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1341 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1342 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1343 AssertRC(rc);
1344 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1345 | HMVMX_READ_EXIT_INSTR_LEN
1346 | HMVMX_READ_EXIT_INSTR_INFO
1347 | HMVMX_READ_IDT_VECTORING_INFO
1348 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1349 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1350 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1351 | HMVMX_READ_GUEST_LINEAR_ADDR
1352 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1353}
1354#endif
1355
1356/**
1357 * Verifies that our cached values of the VMCS fields are all consistent with
1358 * what's actually present in the VMCS.
1359 *
1360 * @returns VBox status code.
1361 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1362 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1363 * VMCS content. HMCPU error-field is
1364 * updated, see VMX_VCI_XXX.
1365 * @param pVCpu The cross context virtual CPU structure.
1366 * @param pVmcsInfo The VMCS info. object.
1367 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1368 */
1369static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1370{
1371 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1372
1373 uint32_t u32Val;
1374 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1375 AssertRC(rc);
1376 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1377 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1378 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1379 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1380
1381 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1382 AssertRC(rc);
1383 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1384 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1385 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1386 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1387
1388 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1389 AssertRC(rc);
1390 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1391 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1392 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1393 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1394
1395 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1396 AssertRC(rc);
1397 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1398 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1399 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1400 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1401
1402 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1403 {
1404 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1405 AssertRC(rc);
1406 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1407 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1408 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1409 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1410 }
1411
1412 uint64_t u64Val;
1413 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1414 {
1415 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1416 AssertRC(rc);
1417 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1418 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1419 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1420 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1421 }
1422
1423 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1424 AssertRC(rc);
1425 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1426 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1427 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1428 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1429
1430 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1431 AssertRC(rc);
1432 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1433 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1434 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1435 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1436
1437 NOREF(pcszVmcs);
1438 return VINF_SUCCESS;
1439}
1440
1441
1442/**
1443 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1444 * VMCS.
1445 *
1446 * This is typically required when the guest changes paging mode.
1447 *
1448 * @returns VBox status code.
1449 * @param pVCpu The cross context virtual CPU structure.
1450 * @param pVmxTransient The VMX-transient structure.
1451 *
1452 * @remarks Requires EFER.
1453 * @remarks No-long-jump zone!!!
1454 */
1455static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1456{
1457 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1458 {
1459 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1460 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1461
1462 /*
1463 * VM-entry controls.
1464 */
1465 {
1466 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1467 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1468
1469 /*
1470 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1471 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1472 *
1473 * For nested-guests, this is a mandatory VM-entry control. It's also
1474 * required because we do not want to leak host bits to the nested-guest.
1475 */
1476 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1477
1478 /*
1479 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1480 *
1481 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1482 * required to get the nested-guest working with hardware-assisted VMX execution.
1483 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1484 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1485 * here rather than while merging the guest VMCS controls.
1486 */
1487 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1488 {
1489 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1490 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1491 }
1492 else
1493 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1494
1495 /*
1496 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1497 *
1498 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1499 * regardless of whether the nested-guest VMCS specifies it because we are free to
1500 * load whatever MSRs we require and we do not need to modify the guest visible copy
1501 * of the VM-entry MSR load area.
1502 */
1503 if ( g_fHmVmxSupportsVmcsEfer
1504#ifndef IN_NEM_DARWIN
1505 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1506#endif
1507 )
1508 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1509 else
1510 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1511
1512 /*
1513 * The following should -not- be set (since we're not in SMM mode):
1514 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1515 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1516 */
1517
1518 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1519 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1520
1521 if ((fVal & fZap) == fVal)
1522 { /* likely */ }
1523 else
1524 {
1525 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1526 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1527 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1528 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1529 }
1530
1531 /* Commit it to the VMCS. */
1532 if (pVmcsInfo->u32EntryCtls != fVal)
1533 {
1534 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1535 AssertRC(rc);
1536 pVmcsInfo->u32EntryCtls = fVal;
1537 }
1538 }
1539
1540 /*
1541 * VM-exit controls.
1542 */
1543 {
1544 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1545 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1546
1547 /*
1548 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1549 * supported the 1-setting of this bit.
1550 *
1551 * For nested-guests, we set the "save debug controls" as the converse
1552 * "load debug controls" is mandatory for nested-guests anyway.
1553 */
1554 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1555
1556 /*
1557 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1558 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1559 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1560 * vmxHCExportHostMsrs().
1561 *
1562 * For nested-guests, we always set this bit as we do not support 32-bit
1563 * hosts.
1564 */
1565 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1566
1567#ifndef IN_NEM_DARWIN
1568 /*
1569 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1570 *
1571 * For nested-guests, we should use the "save IA32_EFER" control if we also
1572 * used the "load IA32_EFER" control while exporting VM-entry controls.
1573 */
1574 if ( g_fHmVmxSupportsVmcsEfer
1575 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1576 {
1577 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1578 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1579 }
1580#endif
1581
1582 /*
1583 * Enable saving of the VMX-preemption timer value on VM-exit.
1584 * For nested-guests, currently not exposed/used.
1585 */
1586 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1587 * the timer value. */
1588 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1589 {
1590 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1591 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1592 }
1593
1594 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1595 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1596
1597 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1598 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1599 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1600
1601 if ((fVal & fZap) == fVal)
1602 { /* likely */ }
1603 else
1604 {
1605 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1606 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1607 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1608 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1609 }
1610
1611 /* Commit it to the VMCS. */
1612 if (pVmcsInfo->u32ExitCtls != fVal)
1613 {
1614 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1615 AssertRC(rc);
1616 pVmcsInfo->u32ExitCtls = fVal;
1617 }
1618 }
1619
1620 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1621 }
1622 return VINF_SUCCESS;
1623}
1624
1625
1626/**
1627 * Sets the TPR threshold in the VMCS.
1628 *
1629 * @param pVCpu The cross context virtual CPU structure.
1630 * @param pVmcsInfo The VMCS info. object.
1631 * @param u32TprThreshold The TPR threshold (task-priority class only).
1632 */
1633DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1634{
1635 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1636 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1637 RT_NOREF(pVmcsInfo);
1638 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1639 AssertRC(rc);
1640}
1641
1642
1643/**
1644 * Exports the guest APIC TPR state into the VMCS.
1645 *
1646 * @param pVCpu The cross context virtual CPU structure.
1647 * @param pVmxTransient The VMX-transient structure.
1648 *
1649 * @remarks No-long-jump zone!!!
1650 */
1651static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1652{
1653 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1654 {
1655 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1656
1657 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1658 if (!pVmxTransient->fIsNestedGuest)
1659 {
1660 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1661 && APICIsEnabled(pVCpu))
1662 {
1663 /*
1664 * Setup TPR shadowing.
1665 */
1666 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1667 {
1668 bool fPendingIntr = false;
1669 uint8_t u8Tpr = 0;
1670 uint8_t u8PendingIntr = 0;
1671 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1672 AssertRC(rc);
1673
1674 /*
1675 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1676 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1677 * priority of the pending interrupt so we can deliver the interrupt. If there
1678 * are no interrupts pending, set threshold to 0 to not cause any
1679 * TPR-below-threshold VM-exits.
1680 */
1681 uint32_t u32TprThreshold = 0;
1682 if (fPendingIntr)
1683 {
1684 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1685 (which is the Task-Priority Class). */
1686 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1687 const uint8_t u8TprPriority = u8Tpr >> 4;
1688 if (u8PendingPriority <= u8TprPriority)
1689 u32TprThreshold = u8PendingPriority;
1690 }
1691
1692 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1693 }
1694 }
1695 }
1696 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1697 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1698 }
1699}
1700
1701
1702/**
1703 * Gets the guest interruptibility-state and updates related force-flags.
1704 *
1705 * @returns Guest's interruptibility-state.
1706 * @param pVCpu The cross context virtual CPU structure.
1707 *
1708 * @remarks No-long-jump zone!!!
1709 */
1710static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1711{
1712 /*
1713 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1714 */
1715 uint32_t fIntrState = 0;
1716 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1717 {
1718 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
1719 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
1720
1721 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1722 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
1723 {
1724 if (pCtx->eflags.Bits.u1IF)
1725 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1726 else
1727 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1728 }
1729 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1730 {
1731 /*
1732 * We can clear the inhibit force flag as even if we go back to the recompiler
1733 * without executing guest code in VT-x, the flag's condition to be cleared is
1734 * met and thus the cleared state is correct.
1735 */
1736 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1737 }
1738 }
1739
1740 /*
1741 * Check if we should inhibit NMI delivery.
1742 */
1743 if (CPUMIsGuestNmiBlocking(pVCpu))
1744 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1745
1746 /*
1747 * Validate.
1748 */
1749#ifdef VBOX_STRICT
1750 /* We don't support block-by-SMI yet.*/
1751 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1752
1753 /* Block-by-STI must not be set when interrupts are disabled. */
1754 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
1755 {
1756 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1757 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
1758 }
1759#endif
1760
1761 return fIntrState;
1762}
1763
1764
1765/**
1766 * Exports the exception intercepts required for guest execution in the VMCS.
1767 *
1768 * @param pVCpu The cross context virtual CPU structure.
1769 * @param pVmxTransient The VMX-transient structure.
1770 *
1771 * @remarks No-long-jump zone!!!
1772 */
1773static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1774{
1775 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1776 {
1777 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1778 if ( !pVmxTransient->fIsNestedGuest
1779 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1780 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1781 else
1782 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1783
1784 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1785 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1786 }
1787}
1788
1789
1790/**
1791 * Exports the guest's RIP into the guest-state area in the VMCS.
1792 *
1793 * @param pVCpu The cross context virtual CPU structure.
1794 *
1795 * @remarks No-long-jump zone!!!
1796 */
1797static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1798{
1799 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1800 {
1801 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1802
1803 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1804 AssertRC(rc);
1805
1806 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1807 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1808 }
1809}
1810
1811
1812/**
1813 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1814 *
1815 * @param pVCpu The cross context virtual CPU structure.
1816 * @param pVmxTransient The VMX-transient structure.
1817 *
1818 * @remarks No-long-jump zone!!!
1819 */
1820static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1821{
1822 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1823 {
1824 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1825
1826 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1827 Let us assert it as such and use 32-bit VMWRITE. */
1828 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1829 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1830 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
1831 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
1832
1833#ifndef IN_NEM_DARWIN
1834 /*
1835 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1836 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1837 * can run the real-mode guest code under Virtual 8086 mode.
1838 */
1839 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1840 if (pVmcsInfo->RealMode.fRealOnV86Active)
1841 {
1842 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1843 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1844 Assert(!pVmxTransient->fIsNestedGuest);
1845 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
1846 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
1847 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1848 }
1849#else
1850 RT_NOREF(pVmxTransient);
1851#endif
1852
1853 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
1854 AssertRC(rc);
1855
1856 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1857 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
1858 }
1859}
1860
1861
1862#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1863/**
1864 * Copies the nested-guest VMCS to the shadow VMCS.
1865 *
1866 * @returns VBox status code.
1867 * @param pVCpu The cross context virtual CPU structure.
1868 * @param pVmcsInfo The VMCS info. object.
1869 *
1870 * @remarks No-long-jump zone!!!
1871 */
1872static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1873{
1874 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1875 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1876
1877 /*
1878 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1879 * current VMCS, as we may try saving guest lazy MSRs.
1880 *
1881 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1882 * calling the import VMCS code which is currently performing the guest MSR reads
1883 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1884 * and the rest of the VMX leave session machinery.
1885 */
1886 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1887
1888 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1889 if (RT_SUCCESS(rc))
1890 {
1891 /*
1892 * Copy all guest read/write VMCS fields.
1893 *
1894 * We don't check for VMWRITE failures here for performance reasons and
1895 * because they are not expected to fail, barring irrecoverable conditions
1896 * like hardware errors.
1897 */
1898 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1899 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1900 {
1901 uint64_t u64Val;
1902 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1903 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1904 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1905 }
1906
1907 /*
1908 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1909 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1910 */
1911 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1912 {
1913 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1914 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1915 {
1916 uint64_t u64Val;
1917 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1918 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1919 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1920 }
1921 }
1922
1923 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1924 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1925 }
1926
1927 ASMSetFlags(fEFlags);
1928 return rc;
1929}
1930
1931
1932/**
1933 * Copies the shadow VMCS to the nested-guest VMCS.
1934 *
1935 * @returns VBox status code.
1936 * @param pVCpu The cross context virtual CPU structure.
1937 * @param pVmcsInfo The VMCS info. object.
1938 *
1939 * @remarks Called with interrupts disabled.
1940 */
1941static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1942{
1943 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1944 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1945 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1946
1947 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1948 if (RT_SUCCESS(rc))
1949 {
1950 /*
1951 * Copy guest read/write fields from the shadow VMCS.
1952 * Guest read-only fields cannot be modified, so no need to copy them.
1953 *
1954 * We don't check for VMREAD failures here for performance reasons and
1955 * because they are not expected to fail, barring irrecoverable conditions
1956 * like hardware errors.
1957 */
1958 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1959 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1960 {
1961 uint64_t u64Val;
1962 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1963 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1964 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1965 }
1966
1967 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1968 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1969 }
1970 return rc;
1971}
1972
1973
1974/**
1975 * Enables VMCS shadowing for the given VMCS info. object.
1976 *
1977 * @param pVCpu The cross context virtual CPU structure.
1978 * @param pVmcsInfo The VMCS info. object.
1979 *
1980 * @remarks No-long-jump zone!!!
1981 */
1982static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1983{
1984 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1985 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1986 {
1987 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1988 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1989 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1990 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1991 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1992 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1993 Log4Func(("Enabled\n"));
1994 }
1995}
1996
1997
1998/**
1999 * Disables VMCS shadowing for the given VMCS info. object.
2000 *
2001 * @param pVCpu The cross context virtual CPU structure.
2002 * @param pVmcsInfo The VMCS info. object.
2003 *
2004 * @remarks No-long-jump zone!!!
2005 */
2006static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2007{
2008 /*
2009 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
2010 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
2011 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2012 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2013 *
2014 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2015 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2016 */
2017 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2018 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2019 {
2020 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2021 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2022 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2023 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2024 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2025 Log4Func(("Disabled\n"));
2026 }
2027}
2028#endif
2029
2030
2031/**
2032 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2033 *
2034 * The guest FPU state is always pre-loaded hence we don't need to bother about
2035 * sharing FPU related CR0 bits between the guest and host.
2036 *
2037 * @returns VBox status code.
2038 * @param pVCpu The cross context virtual CPU structure.
2039 * @param pVmxTransient The VMX-transient structure.
2040 *
2041 * @remarks No-long-jump zone!!!
2042 */
2043static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2044{
2045 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2046 {
2047 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2048 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2049
2050 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2051 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2052 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2053 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2054 else
2055 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2056
2057 if (!pVmxTransient->fIsNestedGuest)
2058 {
2059 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2060 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2061 uint64_t const u64ShadowCr0 = u64GuestCr0;
2062 Assert(!RT_HI_U32(u64GuestCr0));
2063
2064 /*
2065 * Setup VT-x's view of the guest CR0.
2066 */
2067 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2068 if (VM_IS_VMX_NESTED_PAGING(pVM))
2069 {
2070#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2071 if (CPUMIsGuestPagingEnabled(pVCpu))
2072 {
2073 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2074 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2075 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2076 }
2077 else
2078 {
2079 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2080 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2081 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2082 }
2083
2084 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2085 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2086 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2087#endif
2088 }
2089 else
2090 {
2091 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2092 u64GuestCr0 |= X86_CR0_WP;
2093 }
2094
2095 /*
2096 * Guest FPU bits.
2097 *
2098 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2099 * using CR0.TS.
2100 *
2101 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2102 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2103 */
2104 u64GuestCr0 |= X86_CR0_NE;
2105
2106 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2107 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2108
2109 /*
2110 * Update exception intercepts.
2111 */
2112 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2113#ifndef IN_NEM_DARWIN
2114 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2115 {
2116 Assert(PDMVmmDevHeapIsEnabled(pVM));
2117 Assert(pVM->hm.s.vmx.pRealModeTSS);
2118 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2119 }
2120 else
2121#endif
2122 {
2123 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2124 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2125 if (fInterceptMF)
2126 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2127 }
2128
2129 /* Additional intercepts for debugging, define these yourself explicitly. */
2130#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2131 uXcptBitmap |= 0
2132 | RT_BIT(X86_XCPT_BP)
2133 | RT_BIT(X86_XCPT_DE)
2134 | RT_BIT(X86_XCPT_NM)
2135 | RT_BIT(X86_XCPT_TS)
2136 | RT_BIT(X86_XCPT_UD)
2137 | RT_BIT(X86_XCPT_NP)
2138 | RT_BIT(X86_XCPT_SS)
2139 | RT_BIT(X86_XCPT_GP)
2140 | RT_BIT(X86_XCPT_PF)
2141 | RT_BIT(X86_XCPT_MF)
2142 ;
2143#elif defined(HMVMX_ALWAYS_TRAP_PF)
2144 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2145#endif
2146 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2147 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2148 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2149
2150 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2151 u64GuestCr0 |= fSetCr0;
2152 u64GuestCr0 &= fZapCr0;
2153 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2154
2155 /* Commit the CR0 and related fields to the guest VMCS. */
2156 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2157 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2158 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2159 {
2160 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2161 AssertRC(rc);
2162 }
2163 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2164 {
2165 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2166 AssertRC(rc);
2167 }
2168
2169 /* Update our caches. */
2170 pVmcsInfo->u32ProcCtls = uProcCtls;
2171 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2172
2173 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2174 }
2175 else
2176 {
2177 /*
2178 * With nested-guests, we may have extended the guest/host mask here since we
2179 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2180 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2181 * originally supplied. We must copy those bits from the nested-guest CR0 into
2182 * the nested-guest CR0 read-shadow.
2183 */
2184 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2185 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2186 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2187 Assert(!RT_HI_U32(u64GuestCr0));
2188 Assert(u64GuestCr0 & X86_CR0_NE);
2189
2190 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2191 u64GuestCr0 |= fSetCr0;
2192 u64GuestCr0 &= fZapCr0;
2193 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2194
2195 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2196 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2197 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2198
2199 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2200 }
2201
2202 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2203 }
2204
2205 return VINF_SUCCESS;
2206}
2207
2208
2209/**
2210 * Exports the guest control registers (CR3, CR4) into the guest-state area
2211 * in the VMCS.
2212 *
2213 * @returns VBox strict status code.
2214 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2215 * without unrestricted guest access and the VMMDev is not presently
2216 * mapped (e.g. EFI32).
2217 *
2218 * @param pVCpu The cross context virtual CPU structure.
2219 * @param pVmxTransient The VMX-transient structure.
2220 *
2221 * @remarks No-long-jump zone!!!
2222 */
2223static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2224{
2225 int rc = VINF_SUCCESS;
2226 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2227
2228 /*
2229 * Guest CR2.
2230 * It's always loaded in the assembler code. Nothing to do here.
2231 */
2232
2233 /*
2234 * Guest CR3.
2235 */
2236 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2237 {
2238 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2239
2240 if (VM_IS_VMX_NESTED_PAGING(pVM))
2241 {
2242#ifndef IN_NEM_DARWIN
2243 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2244 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2245
2246 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2247 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2248 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2249 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2250
2251 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2252 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2253 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2254
2255 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2256 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2257 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2258 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2259 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2260 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2261 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2262
2263 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2264 AssertRC(rc);
2265#endif
2266
2267 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2268 uint64_t u64GuestCr3 = pCtx->cr3;
2269 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2270 || CPUMIsGuestPagingEnabledEx(pCtx))
2271 {
2272 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2273 if (CPUMIsGuestInPAEModeEx(pCtx))
2274 {
2275 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2276 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2277 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2278 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2279 }
2280
2281 /*
2282 * The guest's view of its CR3 is unblemished with nested paging when the
2283 * guest is using paging or we have unrestricted guest execution to handle
2284 * the guest when it's not using paging.
2285 */
2286 }
2287#ifndef IN_NEM_DARWIN
2288 else
2289 {
2290 /*
2291 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2292 * thinks it accesses physical memory directly, we use our identity-mapped
2293 * page table to map guest-linear to guest-physical addresses. EPT takes care
2294 * of translating it to host-physical addresses.
2295 */
2296 RTGCPHYS GCPhys;
2297 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2298
2299 /* We obtain it here every time as the guest could have relocated this PCI region. */
2300 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2301 if (RT_SUCCESS(rc))
2302 { /* likely */ }
2303 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2304 {
2305 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2306 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2307 }
2308 else
2309 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2310
2311 u64GuestCr3 = GCPhys;
2312 }
2313#endif
2314
2315 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2316 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2317 AssertRC(rc);
2318 }
2319 else
2320 {
2321 Assert(!pVmxTransient->fIsNestedGuest);
2322 /* Non-nested paging case, just use the hypervisor's CR3. */
2323 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2324
2325 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2326 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2327 AssertRC(rc);
2328 }
2329
2330 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2331 }
2332
2333 /*
2334 * Guest CR4.
2335 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2336 */
2337 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2338 {
2339 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2340 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2341
2342 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2343 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2344
2345 /*
2346 * With nested-guests, we may have extended the guest/host mask here (since we
2347 * merged in the outer guest's mask, see vmxHCMergeVmcsNested). This means, the
2348 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2349 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2350 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2351 */
2352 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2353 uint64_t u64GuestCr4 = pCtx->cr4;
2354 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2355 ? pCtx->cr4
2356 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2357 Assert(!RT_HI_U32(u64GuestCr4));
2358
2359#ifndef IN_NEM_DARWIN
2360 /*
2361 * Setup VT-x's view of the guest CR4.
2362 *
2363 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2364 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2365 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2366 *
2367 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2368 */
2369 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2370 {
2371 Assert(pVM->hm.s.vmx.pRealModeTSS);
2372 Assert(PDMVmmDevHeapIsEnabled(pVM));
2373 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2374 }
2375#endif
2376
2377 if (VM_IS_VMX_NESTED_PAGING(pVM))
2378 {
2379 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2380 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2381 {
2382 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2383 u64GuestCr4 |= X86_CR4_PSE;
2384 /* Our identity mapping is a 32-bit page directory. */
2385 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2386 }
2387 /* else use guest CR4.*/
2388 }
2389 else
2390 {
2391 Assert(!pVmxTransient->fIsNestedGuest);
2392
2393 /*
2394 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2395 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2396 */
2397 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2398 {
2399 case PGMMODE_REAL: /* Real-mode. */
2400 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2401 case PGMMODE_32_BIT: /* 32-bit paging. */
2402 {
2403 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2404 break;
2405 }
2406
2407 case PGMMODE_PAE: /* PAE paging. */
2408 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2409 {
2410 u64GuestCr4 |= X86_CR4_PAE;
2411 break;
2412 }
2413
2414 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2415 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2416 {
2417#ifdef VBOX_WITH_64_BITS_GUESTS
2418 /* For our assumption in vmxHCShouldSwapEferMsr. */
2419 Assert(u64GuestCr4 & X86_CR4_PAE);
2420 break;
2421#endif
2422 }
2423 default:
2424 AssertFailed();
2425 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2426 }
2427 }
2428
2429 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2430 u64GuestCr4 |= fSetCr4;
2431 u64GuestCr4 &= fZapCr4;
2432
2433 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2434 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2435 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2436
2437#ifndef IN_NEM_DARWIN
2438 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2439 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2440 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2441 {
2442 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2443 hmR0VmxUpdateStartVmFunction(pVCpu);
2444 }
2445#endif
2446
2447 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2448
2449 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2450 }
2451 return rc;
2452}
2453
2454
2455#ifdef VBOX_STRICT
2456/**
2457 * Strict function to validate segment registers.
2458 *
2459 * @param pVCpu The cross context virtual CPU structure.
2460 * @param pVmcsInfo The VMCS info. object.
2461 *
2462 * @remarks Will import guest CR0 on strict builds during validation of
2463 * segments.
2464 */
2465static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2466{
2467 /*
2468 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2469 *
2470 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2471 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2472 * unusable bit and doesn't change the guest-context value.
2473 */
2474 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2475 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2476 vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2477 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2478 && ( !CPUMIsGuestInRealModeEx(pCtx)
2479 && !CPUMIsGuestInV86ModeEx(pCtx)))
2480 {
2481 /* Protected mode checks */
2482 /* CS */
2483 Assert(pCtx->cs.Attr.n.u1Present);
2484 Assert(!(pCtx->cs.Attr.u & 0xf00));
2485 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2486 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2487 || !(pCtx->cs.Attr.n.u1Granularity));
2488 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2489 || (pCtx->cs.Attr.n.u1Granularity));
2490 /* CS cannot be loaded with NULL in protected mode. */
2491 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2492 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2493 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2494 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2495 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2496 else
2497 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2498 /* SS */
2499 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2500 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2501 if ( !(pCtx->cr0 & X86_CR0_PE)
2502 || pCtx->cs.Attr.n.u4Type == 3)
2503 {
2504 Assert(!pCtx->ss.Attr.n.u2Dpl);
2505 }
2506 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2507 {
2508 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2509 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2510 Assert(pCtx->ss.Attr.n.u1Present);
2511 Assert(!(pCtx->ss.Attr.u & 0xf00));
2512 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2513 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2514 || !(pCtx->ss.Attr.n.u1Granularity));
2515 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2516 || (pCtx->ss.Attr.n.u1Granularity));
2517 }
2518 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2519 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2520 {
2521 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2522 Assert(pCtx->ds.Attr.n.u1Present);
2523 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2524 Assert(!(pCtx->ds.Attr.u & 0xf00));
2525 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2526 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2527 || !(pCtx->ds.Attr.n.u1Granularity));
2528 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2529 || (pCtx->ds.Attr.n.u1Granularity));
2530 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2531 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2532 }
2533 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2534 {
2535 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2536 Assert(pCtx->es.Attr.n.u1Present);
2537 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2538 Assert(!(pCtx->es.Attr.u & 0xf00));
2539 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2540 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2541 || !(pCtx->es.Attr.n.u1Granularity));
2542 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2543 || (pCtx->es.Attr.n.u1Granularity));
2544 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2545 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2546 }
2547 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2548 {
2549 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2550 Assert(pCtx->fs.Attr.n.u1Present);
2551 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2552 Assert(!(pCtx->fs.Attr.u & 0xf00));
2553 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2554 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2555 || !(pCtx->fs.Attr.n.u1Granularity));
2556 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2557 || (pCtx->fs.Attr.n.u1Granularity));
2558 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2559 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2560 }
2561 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2562 {
2563 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2564 Assert(pCtx->gs.Attr.n.u1Present);
2565 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2566 Assert(!(pCtx->gs.Attr.u & 0xf00));
2567 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2568 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2569 || !(pCtx->gs.Attr.n.u1Granularity));
2570 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2571 || (pCtx->gs.Attr.n.u1Granularity));
2572 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2573 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2574 }
2575 /* 64-bit capable CPUs. */
2576 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2577 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2578 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2579 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2580 }
2581 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2582 || ( CPUMIsGuestInRealModeEx(pCtx)
2583 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2584 {
2585 /* Real and v86 mode checks. */
2586 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2587 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2588#ifndef IN_NEM_DARWIN
2589 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2590 {
2591 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2592 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2593 }
2594 else
2595#endif
2596 {
2597 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2598 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2599 }
2600
2601 /* CS */
2602 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2603 Assert(pCtx->cs.u32Limit == 0xffff);
2604 Assert(u32CSAttr == 0xf3);
2605 /* SS */
2606 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2607 Assert(pCtx->ss.u32Limit == 0xffff);
2608 Assert(u32SSAttr == 0xf3);
2609 /* DS */
2610 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2611 Assert(pCtx->ds.u32Limit == 0xffff);
2612 Assert(u32DSAttr == 0xf3);
2613 /* ES */
2614 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2615 Assert(pCtx->es.u32Limit == 0xffff);
2616 Assert(u32ESAttr == 0xf3);
2617 /* FS */
2618 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2619 Assert(pCtx->fs.u32Limit == 0xffff);
2620 Assert(u32FSAttr == 0xf3);
2621 /* GS */
2622 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2623 Assert(pCtx->gs.u32Limit == 0xffff);
2624 Assert(u32GSAttr == 0xf3);
2625 /* 64-bit capable CPUs. */
2626 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2627 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2628 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2629 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2630 }
2631}
2632#endif /* VBOX_STRICT */
2633
2634
2635/**
2636 * Exports a guest segment register into the guest-state area in the VMCS.
2637 *
2638 * @returns VBox status code.
2639 * @param pVCpu The cross context virtual CPU structure.
2640 * @param pVmcsInfo The VMCS info. object.
2641 * @param iSegReg The segment register number (X86_SREG_XXX).
2642 * @param pSelReg Pointer to the segment selector.
2643 *
2644 * @remarks No-long-jump zone!!!
2645 */
2646static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2647{
2648 Assert(iSegReg < X86_SREG_COUNT);
2649
2650 uint32_t u32Access = pSelReg->Attr.u;
2651#ifndef IN_NEM_DARWIN
2652 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2653#endif
2654 {
2655 /*
2656 * The way to differentiate between whether this is really a null selector or was just
2657 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2658 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2659 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2660 * NULL selectors loaded in protected-mode have their attribute as 0.
2661 */
2662 if (u32Access)
2663 { }
2664 else
2665 u32Access = X86DESCATTR_UNUSABLE;
2666 }
2667#ifndef IN_NEM_DARWIN
2668 else
2669 {
2670 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2671 u32Access = 0xf3;
2672 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2673 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2674 RT_NOREF_PV(pVCpu);
2675 }
2676#else
2677 RT_NOREF(pVmcsInfo);
2678#endif
2679
2680 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2681 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2682 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2683
2684 /*
2685 * Commit it to the VMCS.
2686 */
2687 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
2688 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
2689 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
2690 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
2691 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2692 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2693 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2694 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2695 return VINF_SUCCESS;
2696}
2697
2698
2699/**
2700 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2701 * area in the VMCS.
2702 *
2703 * @returns VBox status code.
2704 * @param pVCpu The cross context virtual CPU structure.
2705 * @param pVmxTransient The VMX-transient structure.
2706 *
2707 * @remarks Will import guest CR0 on strict builds during validation of
2708 * segments.
2709 * @remarks No-long-jump zone!!!
2710 */
2711static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2712{
2713 int rc = VERR_INTERNAL_ERROR_5;
2714#ifndef IN_NEM_DARWIN
2715 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2716#endif
2717 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2718 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2719#ifndef IN_NEM_DARWIN
2720 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2721#endif
2722
2723 /*
2724 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2725 */
2726 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2727 {
2728 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2729 {
2730 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2731#ifndef IN_NEM_DARWIN
2732 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2733 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2734#endif
2735 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2736 AssertRC(rc);
2737 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2738 }
2739
2740 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2741 {
2742 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2743#ifndef IN_NEM_DARWIN
2744 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2745 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2746#endif
2747 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2748 AssertRC(rc);
2749 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2750 }
2751
2752 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2753 {
2754 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2755#ifndef IN_NEM_DARWIN
2756 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2757 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2758#endif
2759 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2760 AssertRC(rc);
2761 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2762 }
2763
2764 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2765 {
2766 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2767#ifndef IN_NEM_DARWIN
2768 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2769 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2770#endif
2771 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2772 AssertRC(rc);
2773 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2774 }
2775
2776 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2777 {
2778 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2779#ifndef IN_NEM_DARWIN
2780 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2781 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2782#endif
2783 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2784 AssertRC(rc);
2785 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2786 }
2787
2788 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2789 {
2790 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2791#ifndef IN_NEM_DARWIN
2792 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2793 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2794#endif
2795 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2796 AssertRC(rc);
2797 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2798 }
2799
2800#ifdef VBOX_STRICT
2801 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2802#endif
2803 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2804 pCtx->cs.Attr.u));
2805 }
2806
2807 /*
2808 * Guest TR.
2809 */
2810 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2811 {
2812 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2813
2814 /*
2815 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2816 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2817 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2818 */
2819 uint16_t u16Sel;
2820 uint32_t u32Limit;
2821 uint64_t u64Base;
2822 uint32_t u32AccessRights;
2823#ifndef IN_NEM_DARWIN
2824 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2825#endif
2826 {
2827 u16Sel = pCtx->tr.Sel;
2828 u32Limit = pCtx->tr.u32Limit;
2829 u64Base = pCtx->tr.u64Base;
2830 u32AccessRights = pCtx->tr.Attr.u;
2831 }
2832#ifndef IN_NEM_DARWIN
2833 else
2834 {
2835 Assert(!pVmxTransient->fIsNestedGuest);
2836 Assert(pVM->hm.s.vmx.pRealModeTSS);
2837 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2838
2839 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2840 RTGCPHYS GCPhys;
2841 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2842 AssertRCReturn(rc, rc);
2843
2844 X86DESCATTR DescAttr;
2845 DescAttr.u = 0;
2846 DescAttr.n.u1Present = 1;
2847 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2848
2849 u16Sel = 0;
2850 u32Limit = HM_VTX_TSS_SIZE;
2851 u64Base = GCPhys;
2852 u32AccessRights = DescAttr.u;
2853 }
2854#endif
2855
2856 /* Validate. */
2857 Assert(!(u16Sel & RT_BIT(2)));
2858 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2859 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2860 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2861 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2862 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2863 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2864 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2865 Assert( (u32Limit & 0xfff) == 0xfff
2866 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2867 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2868 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2869
2870 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2871 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2872 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2873 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2874
2875 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2876 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2877 }
2878
2879 /*
2880 * Guest GDTR.
2881 */
2882 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2883 {
2884 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2885
2886 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2887 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2888
2889 /* Validate. */
2890 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2891
2892 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2893 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2894 }
2895
2896 /*
2897 * Guest LDTR.
2898 */
2899 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2900 {
2901 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2902
2903 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2904 uint32_t u32Access;
2905 if ( !pVmxTransient->fIsNestedGuest
2906 && !pCtx->ldtr.Attr.u)
2907 u32Access = X86DESCATTR_UNUSABLE;
2908 else
2909 u32Access = pCtx->ldtr.Attr.u;
2910
2911 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2912 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2913 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2914 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2915
2916 /* Validate. */
2917 if (!(u32Access & X86DESCATTR_UNUSABLE))
2918 {
2919 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2920 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2921 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2922 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2923 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2924 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2925 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2926 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2927 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2928 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2929 }
2930
2931 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2932 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2933 }
2934
2935 /*
2936 * Guest IDTR.
2937 */
2938 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2939 {
2940 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2941
2942 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2943 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2944
2945 /* Validate. */
2946 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2947
2948 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2949 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2950 }
2951
2952 return VINF_SUCCESS;
2953}
2954
2955
2956/**
2957 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2958 * VM-exit interruption info type.
2959 *
2960 * @returns The IEM exception flags.
2961 * @param uVector The event vector.
2962 * @param uVmxEventType The VMX event type.
2963 *
2964 * @remarks This function currently only constructs flags required for
2965 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2966 * and CR2 aspects of an exception are not included).
2967 */
2968static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2969{
2970 uint32_t fIemXcptFlags;
2971 switch (uVmxEventType)
2972 {
2973 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2974 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2975 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2976 break;
2977
2978 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2979 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2980 break;
2981
2982 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2983 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2984 break;
2985
2986 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2987 {
2988 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2989 if (uVector == X86_XCPT_BP)
2990 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2991 else if (uVector == X86_XCPT_OF)
2992 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2993 else
2994 {
2995 fIemXcptFlags = 0;
2996 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2997 }
2998 break;
2999 }
3000
3001 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
3002 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3003 break;
3004
3005 default:
3006 fIemXcptFlags = 0;
3007 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3008 break;
3009 }
3010 return fIemXcptFlags;
3011}
3012
3013
3014/**
3015 * Sets an event as a pending event to be injected into the guest.
3016 *
3017 * @param pVCpu The cross context virtual CPU structure.
3018 * @param u32IntInfo The VM-entry interruption-information field.
3019 * @param cbInstr The VM-entry instruction length in bytes (for
3020 * software interrupts, exceptions and privileged
3021 * software exceptions).
3022 * @param u32ErrCode The VM-entry exception error code.
3023 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3024 * page-fault.
3025 */
3026DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3027 RTGCUINTPTR GCPtrFaultAddress)
3028{
3029 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3030 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3031 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3032 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3033 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3034 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3035}
3036
3037
3038/**
3039 * Sets an external interrupt as pending-for-injection into the VM.
3040 *
3041 * @param pVCpu The cross context virtual CPU structure.
3042 * @param u8Interrupt The external interrupt vector.
3043 */
3044DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3045{
3046 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3047 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3048 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3049 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3050 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3051}
3052
3053
3054/**
3055 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3056 *
3057 * @param pVCpu The cross context virtual CPU structure.
3058 */
3059DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3060{
3061 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3062 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3063 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3064 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3065 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3066}
3067
3068
3069/**
3070 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3071 *
3072 * @param pVCpu The cross context virtual CPU structure.
3073 */
3074DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3075{
3076 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3077 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3078 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3079 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3080 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3081}
3082
3083
3084/**
3085 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3086 *
3087 * @param pVCpu The cross context virtual CPU structure.
3088 */
3089DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3090{
3091 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3092 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3093 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3094 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3095 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3096}
3097
3098
3099/**
3100 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3101 *
3102 * @param pVCpu The cross context virtual CPU structure.
3103 */
3104DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3105{
3106 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3107 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3108 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3109 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3110 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3111}
3112
3113
3114#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3115/**
3116 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3117 *
3118 * @param pVCpu The cross context virtual CPU structure.
3119 * @param u32ErrCode The error code for the general-protection exception.
3120 */
3121DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3122{
3123 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3124 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3125 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3126 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3127 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3128}
3129
3130
3131/**
3132 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3133 *
3134 * @param pVCpu The cross context virtual CPU structure.
3135 * @param u32ErrCode The error code for the stack exception.
3136 */
3137DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3138{
3139 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3140 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3141 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3142 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3143 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3144}
3145#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3146
3147
3148/**
3149 * Fixes up attributes for the specified segment register.
3150 *
3151 * @param pVCpu The cross context virtual CPU structure.
3152 * @param pSelReg The segment register that needs fixing.
3153 * @param pszRegName The register name (for logging and assertions).
3154 */
3155static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3156{
3157 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3158
3159 /*
3160 * If VT-x marks the segment as unusable, most other bits remain undefined:
3161 * - For CS the L, D and G bits have meaning.
3162 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3163 * - For the remaining data segments no bits are defined.
3164 *
3165 * The present bit and the unusable bit has been observed to be set at the
3166 * same time (the selector was supposed to be invalid as we started executing
3167 * a V8086 interrupt in ring-0).
3168 *
3169 * What should be important for the rest of the VBox code, is that the P bit is
3170 * cleared. Some of the other VBox code recognizes the unusable bit, but
3171 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3172 * safe side here, we'll strip off P and other bits we don't care about. If
3173 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3174 *
3175 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3176 */
3177#ifdef VBOX_STRICT
3178 uint32_t const uAttr = pSelReg->Attr.u;
3179#endif
3180
3181 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3182 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3183 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3184
3185#ifdef VBOX_STRICT
3186# ifndef IN_NEM_DARWIN
3187 VMMRZCallRing3Disable(pVCpu);
3188# endif
3189 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3190# ifdef DEBUG_bird
3191 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3192 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3193 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3194# endif
3195# ifndef IN_NEM_DARWIN
3196 VMMRZCallRing3Enable(pVCpu);
3197# endif
3198 NOREF(uAttr);
3199#endif
3200 RT_NOREF2(pVCpu, pszRegName);
3201}
3202
3203
3204/**
3205 * Imports a guest segment register from the current VMCS into the guest-CPU
3206 * context.
3207 *
3208 * @param pVCpu The cross context virtual CPU structure.
3209 * @param iSegReg The segment register number (X86_SREG_XXX).
3210 *
3211 * @remarks Called with interrupts and/or preemption disabled.
3212 */
3213static void vmxHCImportGuestSegReg(PVMCPUCC pVCpu, uint32_t iSegReg)
3214{
3215 Assert(iSegReg < X86_SREG_COUNT);
3216 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
3217 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
3218 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
3219 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
3220
3221 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
3222
3223 uint16_t u16Sel;
3224 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), &u16Sel); AssertRC(rc);
3225 pSelReg->Sel = u16Sel;
3226 pSelReg->ValidSel = u16Sel;
3227
3228 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3229 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), &pSelReg->u64Base); AssertRC(rc);
3230
3231 uint32_t u32Attr;
3232 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), &u32Attr); AssertRC(rc);
3233 pSelReg->Attr.u = u32Attr;
3234 if (u32Attr & X86DESCATTR_UNUSABLE)
3235 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + iSegReg * 3);
3236
3237 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3238}
3239
3240
3241/**
3242 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3243 *
3244 * @param pVCpu The cross context virtual CPU structure.
3245 *
3246 * @remarks Called with interrupts and/or preemption disabled.
3247 */
3248static void vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3249{
3250 uint16_t u16Sel;
3251 uint64_t u64Base;
3252 uint32_t u32Limit, u32Attr;
3253 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3254 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3255 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3256 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3257
3258 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3259 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3260 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3261 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3262 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3263 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3264 if (u32Attr & X86DESCATTR_UNUSABLE)
3265 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3266}
3267
3268
3269/**
3270 * Imports the guest TR from the current VMCS into the guest-CPU context.
3271 *
3272 * @param pVCpu The cross context virtual CPU structure.
3273 *
3274 * @remarks Called with interrupts and/or preemption disabled.
3275 */
3276static void vmxHCImportGuestTr(PVMCPUCC pVCpu)
3277{
3278 uint16_t u16Sel;
3279 uint64_t u64Base;
3280 uint32_t u32Limit, u32Attr;
3281 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3282 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3283 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3284 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3285
3286 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3287 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3288 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3289 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3290 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3291 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3292 /* TR is the only selector that can never be unusable. */
3293 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3294}
3295
3296
3297/**
3298 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3299 *
3300 * @param pVCpu The cross context virtual CPU structure.
3301 *
3302 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3303 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3304 * instead!!!
3305 */
3306static void vmxHCImportGuestRip(PVMCPUCC pVCpu)
3307{
3308 uint64_t u64Val;
3309 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3310 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
3311 {
3312 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3313 AssertRC(rc);
3314
3315 pCtx->rip = u64Val;
3316 EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
3317 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
3318 }
3319}
3320
3321
3322/**
3323 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3324 *
3325 * @param pVCpu The cross context virtual CPU structure.
3326 * @param pVmcsInfo The VMCS info. object.
3327 *
3328 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3329 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3330 * instead!!!
3331 */
3332static void vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3333{
3334 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3335 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
3336 {
3337 uint64_t u64Val;
3338 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3339 AssertRC(rc);
3340
3341 pCtx->rflags.u64 = u64Val;
3342#ifndef IN_NEM_DARWIN
3343 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3344 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3345 {
3346 pCtx->eflags.Bits.u1VM = 0;
3347 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3348 }
3349#else
3350 RT_NOREF(pVmcsInfo);
3351#endif
3352 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3353 }
3354}
3355
3356
3357/**
3358 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3359 * context.
3360 *
3361 * @param pVCpu The cross context virtual CPU structure.
3362 * @param pVmcsInfo The VMCS info. object.
3363 *
3364 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3365 * do not log!
3366 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3367 * instead!!!
3368 */
3369static void vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3370{
3371 uint32_t u32Val;
3372 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3373 if (!u32Val)
3374 {
3375 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3376 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3377 CPUMSetGuestNmiBlocking(pVCpu, false);
3378 }
3379 else
3380 {
3381 /*
3382 * We must import RIP here to set our EM interrupt-inhibited state.
3383 * We also import RFLAGS as our code that evaluates pending interrupts
3384 * before VM-entry requires it.
3385 */
3386 vmxHCImportGuestRip(pVCpu);
3387 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3388
3389 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3390 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
3391 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3392 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3393
3394 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
3395 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
3396 }
3397}
3398
3399
3400/**
3401 * Worker for VMXR0ImportStateOnDemand.
3402 *
3403 * @returns VBox status code.
3404 * @param pVCpu The cross context virtual CPU structure.
3405 * @param pVmcsInfo The VMCS info. object.
3406 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3407 */
3408static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3409{
3410 int rc = VINF_SUCCESS;
3411 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3412 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3413 uint32_t u32Val;
3414
3415 /*
3416 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3417 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3418 * neither are other host platforms.
3419 *
3420 * Committing this temporarily as it prevents BSOD.
3421 *
3422 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3423 */
3424# ifdef RT_OS_WINDOWS
3425 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3426 return VERR_HM_IPE_1;
3427# endif
3428
3429 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3430
3431#ifndef IN_NEM_DARWIN
3432 /*
3433 * We disable interrupts to make the updating of the state and in particular
3434 * the fExtrn modification atomic wrt to preemption hooks.
3435 */
3436 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3437#endif
3438
3439 fWhat &= pCtx->fExtrn;
3440 if (fWhat)
3441 {
3442 do
3443 {
3444 if (fWhat & CPUMCTX_EXTRN_RIP)
3445 vmxHCImportGuestRip(pVCpu);
3446
3447 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3448 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3449
3450 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3451 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3452
3453 if (fWhat & CPUMCTX_EXTRN_RSP)
3454 {
3455 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3456 AssertRC(rc);
3457 }
3458
3459 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3460 {
3461 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3462#ifndef IN_NEM_DARWIN
3463 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3464#else
3465 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3466#endif
3467 if (fWhat & CPUMCTX_EXTRN_CS)
3468 {
3469 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
3470 vmxHCImportGuestRip(pVCpu);
3471 if (fRealOnV86Active)
3472 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3473 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3474 }
3475 if (fWhat & CPUMCTX_EXTRN_SS)
3476 {
3477 vmxHCImportGuestSegReg(pVCpu, X86_SREG_SS);
3478 if (fRealOnV86Active)
3479 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3480 }
3481 if (fWhat & CPUMCTX_EXTRN_DS)
3482 {
3483 vmxHCImportGuestSegReg(pVCpu, X86_SREG_DS);
3484 if (fRealOnV86Active)
3485 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3486 }
3487 if (fWhat & CPUMCTX_EXTRN_ES)
3488 {
3489 vmxHCImportGuestSegReg(pVCpu, X86_SREG_ES);
3490 if (fRealOnV86Active)
3491 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3492 }
3493 if (fWhat & CPUMCTX_EXTRN_FS)
3494 {
3495 vmxHCImportGuestSegReg(pVCpu, X86_SREG_FS);
3496 if (fRealOnV86Active)
3497 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3498 }
3499 if (fWhat & CPUMCTX_EXTRN_GS)
3500 {
3501 vmxHCImportGuestSegReg(pVCpu, X86_SREG_GS);
3502 if (fRealOnV86Active)
3503 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3504 }
3505 }
3506
3507 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3508 {
3509 if (fWhat & CPUMCTX_EXTRN_LDTR)
3510 vmxHCImportGuestLdtr(pVCpu);
3511
3512 if (fWhat & CPUMCTX_EXTRN_GDTR)
3513 {
3514 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3515 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3516 pCtx->gdtr.cbGdt = u32Val;
3517 }
3518
3519 /* Guest IDTR. */
3520 if (fWhat & CPUMCTX_EXTRN_IDTR)
3521 {
3522 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3523 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3524 pCtx->idtr.cbIdt = u32Val;
3525 }
3526
3527 /* Guest TR. */
3528 if (fWhat & CPUMCTX_EXTRN_TR)
3529 {
3530#ifndef IN_NEM_DARWIN
3531 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3532 don't need to import that one. */
3533 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3534#endif
3535 vmxHCImportGuestTr(pVCpu);
3536 }
3537 }
3538
3539 if (fWhat & CPUMCTX_EXTRN_DR7)
3540 {
3541#ifndef IN_NEM_DARWIN
3542 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3543#endif
3544 {
3545 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3546 AssertRC(rc);
3547 }
3548 }
3549
3550 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3551 {
3552 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3553 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3554 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3555 pCtx->SysEnter.cs = u32Val;
3556 }
3557
3558#ifndef IN_NEM_DARWIN
3559 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3560 {
3561 if ( pVM->hmr0.s.fAllow64BitGuests
3562 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3563 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3564 }
3565
3566 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3567 {
3568 if ( pVM->hmr0.s.fAllow64BitGuests
3569 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3570 {
3571 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3572 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3573 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3574 }
3575 }
3576
3577 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3578 {
3579 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3580 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3581 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3582 Assert(pMsrs);
3583 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3584 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3585 for (uint32_t i = 0; i < cMsrs; i++)
3586 {
3587 uint32_t const idMsr = pMsrs[i].u32Msr;
3588 switch (idMsr)
3589 {
3590 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3591 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3592 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3593 default:
3594 {
3595 uint32_t idxLbrMsr;
3596 if (VM_IS_VMX_LBR(pVM))
3597 {
3598 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3599 {
3600 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3601 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3602 break;
3603 }
3604 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3605 {
3606 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3607 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3608 break;
3609 }
3610 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3611 {
3612 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3613 break;
3614 }
3615 /* Fallthru (no break) */
3616 }
3617 pCtx->fExtrn = 0;
3618 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3619 ASMSetFlags(fEFlags);
3620 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3621 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3622 }
3623 }
3624 }
3625 }
3626#endif
3627
3628 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3629 {
3630 if (fWhat & CPUMCTX_EXTRN_CR0)
3631 {
3632 uint64_t u64Cr0;
3633 uint64_t u64Shadow;
3634 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3635 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3636#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3637 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3638 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3639#else
3640 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3641 {
3642 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3643 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3644 }
3645 else
3646 {
3647 /*
3648 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3649 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3650 * re-construct CR0. See @bugref{9180#c95} for details.
3651 */
3652 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3653 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3654 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3655 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3656 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3657 }
3658#endif
3659#ifndef IN_NEM_DARWIN
3660 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3661#endif
3662 CPUMSetGuestCR0(pVCpu, u64Cr0);
3663#ifndef IN_NEM_DARWIN
3664 VMMRZCallRing3Enable(pVCpu);
3665#endif
3666 }
3667
3668 if (fWhat & CPUMCTX_EXTRN_CR4)
3669 {
3670 uint64_t u64Cr4;
3671 uint64_t u64Shadow;
3672 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3673 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3674#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3675 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3676 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3677#else
3678 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3679 {
3680 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3681 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3682 }
3683 else
3684 {
3685 /*
3686 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3687 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3688 * re-construct CR4. See @bugref{9180#c95} for details.
3689 */
3690 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3691 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3692 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3693 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3694 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3695 }
3696#endif
3697 pCtx->cr4 = u64Cr4;
3698 }
3699
3700 if (fWhat & CPUMCTX_EXTRN_CR3)
3701 {
3702 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3703 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3704 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3705 && CPUMIsGuestPagingEnabledEx(pCtx)))
3706 {
3707 uint64_t u64Cr3;
3708 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3709 if (pCtx->cr3 != u64Cr3)
3710 {
3711 pCtx->cr3 = u64Cr3;
3712 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3713 }
3714
3715 /*
3716 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3717 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3718 */
3719 if (CPUMIsGuestInPAEModeEx(pCtx))
3720 {
3721 X86PDPE aPaePdpes[4];
3722 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3723 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3724 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3725 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3726 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3727 {
3728 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3729 /* PGM now updates PAE PDPTEs while updating CR3. */
3730 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3731 }
3732 }
3733 }
3734 }
3735 }
3736
3737#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3738 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3739 {
3740 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3741 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3742 {
3743 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3744 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3745 if (RT_SUCCESS(rc))
3746 { /* likely */ }
3747 else
3748 break;
3749 }
3750 }
3751#endif
3752 } while (0);
3753
3754 if (RT_SUCCESS(rc))
3755 {
3756 /* Update fExtrn. */
3757 pCtx->fExtrn &= ~fWhat;
3758
3759 /* If everything has been imported, clear the HM keeper bit. */
3760 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3761 {
3762#ifndef IN_NEM_DARWIN
3763 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3764#else
3765 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3766#endif
3767 Assert(!pCtx->fExtrn);
3768 }
3769 }
3770 }
3771#ifndef IN_NEM_DARWIN
3772 else
3773 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3774
3775 /*
3776 * Restore interrupts.
3777 */
3778 ASMSetFlags(fEFlags);
3779#endif
3780
3781 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3782
3783 if (RT_SUCCESS(rc))
3784 { /* likely */ }
3785 else
3786 return rc;
3787
3788 /*
3789 * Honor any pending CR3 updates.
3790 *
3791 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3792 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3793 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3794 *
3795 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3796 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3797 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3798 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3799 *
3800 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3801 *
3802 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3803 */
3804 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3805#ifndef IN_NEM_DARWIN
3806 && VMMRZCallRing3IsEnabled(pVCpu)
3807#endif
3808 )
3809 {
3810 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3811 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3812 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3813 }
3814
3815 return VINF_SUCCESS;
3816}
3817
3818
3819/**
3820 * Check per-VM and per-VCPU force flag actions that require us to go back to
3821 * ring-3 for one reason or another.
3822 *
3823 * @returns Strict VBox status code (i.e. informational status codes too)
3824 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3825 * ring-3.
3826 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3827 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3828 * interrupts)
3829 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3830 * all EMTs to be in ring-3.
3831 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3832 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3833 * to the EM loop.
3834 *
3835 * @param pVCpu The cross context virtual CPU structure.
3836 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
3837 * @param fStepping Whether we are single-stepping the guest using the
3838 * hypervisor debugger.
3839 *
3840 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
3841 * is no longer in VMX non-root mode.
3842 */
3843static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
3844{
3845#ifndef IN_NEM_DARWIN
3846 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3847#endif
3848
3849 /*
3850 * Update pending interrupts into the APIC's IRR.
3851 */
3852 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3853 APICUpdatePendingInterrupts(pVCpu);
3854
3855 /*
3856 * Anything pending? Should be more likely than not if we're doing a good job.
3857 */
3858 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3859 if ( !fStepping
3860 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
3861 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
3862 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
3863 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3864 return VINF_SUCCESS;
3865
3866 /* Pending PGM C3 sync. */
3867 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
3868 {
3869 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3870 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
3871 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
3872 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
3873 if (rcStrict != VINF_SUCCESS)
3874 {
3875 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
3876 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
3877 return rcStrict;
3878 }
3879 }
3880
3881 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
3882 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
3883 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3884 {
3885 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
3886 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
3887 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
3888 return rc;
3889 }
3890
3891 /* Pending VM request packets, such as hardware interrupts. */
3892 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
3893 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
3894 {
3895 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
3896 Log4Func(("Pending VM request forcing us back to ring-3\n"));
3897 return VINF_EM_PENDING_REQUEST;
3898 }
3899
3900 /* Pending PGM pool flushes. */
3901 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3902 {
3903 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
3904 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
3905 return VINF_PGM_POOL_FLUSH_PENDING;
3906 }
3907
3908 /* Pending DMA requests. */
3909 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
3910 {
3911 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
3912 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
3913 return VINF_EM_RAW_TO_R3;
3914 }
3915
3916#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3917 /*
3918 * Pending nested-guest events.
3919 *
3920 * Please note the priority of these events are specified and important.
3921 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
3922 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
3923 */
3924 if (fIsNestedGuest)
3925 {
3926 /* Pending nested-guest APIC-write. */
3927 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3928 {
3929 Log4Func(("Pending nested-guest APIC-write\n"));
3930 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
3931 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3932 return rcStrict;
3933 }
3934
3935 /* Pending nested-guest monitor-trap flag (MTF). */
3936 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
3937 {
3938 Log4Func(("Pending nested-guest MTF\n"));
3939 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
3940 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3941 return rcStrict;
3942 }
3943
3944 /* Pending nested-guest VMX-preemption timer expired. */
3945 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
3946 {
3947 Log4Func(("Pending nested-guest preempt timer\n"));
3948 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
3949 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3950 return rcStrict;
3951 }
3952 }
3953#else
3954 NOREF(fIsNestedGuest);
3955#endif
3956
3957 return VINF_SUCCESS;
3958}
3959
3960
3961/**
3962 * Converts any TRPM trap into a pending HM event. This is typically used when
3963 * entering from ring-3 (not longjmp returns).
3964 *
3965 * @param pVCpu The cross context virtual CPU structure.
3966 */
3967static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
3968{
3969 Assert(TRPMHasTrap(pVCpu));
3970 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3971
3972 uint8_t uVector;
3973 TRPMEVENT enmTrpmEvent;
3974 uint32_t uErrCode;
3975 RTGCUINTPTR GCPtrFaultAddress;
3976 uint8_t cbInstr;
3977 bool fIcebp;
3978
3979 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
3980 AssertRC(rc);
3981
3982 uint32_t u32IntInfo;
3983 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
3984 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
3985
3986 rc = TRPMResetTrap(pVCpu);
3987 AssertRC(rc);
3988 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
3989 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
3990
3991 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
3992}
3993
3994
3995/**
3996 * Converts the pending HM event into a TRPM trap.
3997 *
3998 * @param pVCpu The cross context virtual CPU structure.
3999 */
4000static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4001{
4002 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4003
4004 /* If a trap was already pending, we did something wrong! */
4005 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4006
4007 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4008 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4009 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4010
4011 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4012
4013 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4014 AssertRC(rc);
4015
4016 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4017 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4018
4019 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4020 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4021 else
4022 {
4023 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4024 switch (uVectorType)
4025 {
4026 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4027 TRPMSetTrapDueToIcebp(pVCpu);
4028 RT_FALL_THRU();
4029 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4030 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4031 {
4032 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4033 || ( uVector == X86_XCPT_BP /* INT3 */
4034 || uVector == X86_XCPT_OF /* INTO */
4035 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4036 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4037 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4038 break;
4039 }
4040 }
4041 }
4042
4043 /* We're now done converting the pending event. */
4044 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4045}
4046
4047
4048/**
4049 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4050 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4051 *
4052 * @param pVCpu The cross context virtual CPU structure.
4053 * @param pVmcsInfo The VMCS info. object.
4054 */
4055static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4056{
4057 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4058 {
4059 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4060 {
4061 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4062 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4063 AssertRC(rc);
4064 }
4065 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4066}
4067
4068
4069/**
4070 * Clears the interrupt-window exiting control in the VMCS.
4071 *
4072 * @param pVCpu The cross context virtual CPU structure.
4073 * @param pVmcsInfo The VMCS info. object.
4074 */
4075DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4076{
4077 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4078 {
4079 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4080 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4081 AssertRC(rc);
4082 }
4083}
4084
4085
4086/**
4087 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4088 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4089 *
4090 * @param pVCpu The cross context virtual CPU structure.
4091 * @param pVmcsInfo The VMCS info. object.
4092 */
4093static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4094{
4095 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4096 {
4097 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4098 {
4099 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4100 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4101 AssertRC(rc);
4102 Log4Func(("Setup NMI-window exiting\n"));
4103 }
4104 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4105}
4106
4107
4108/**
4109 * Clears the NMI-window exiting control in the VMCS.
4110 *
4111 * @param pVCpu The cross context virtual CPU structure.
4112 * @param pVmcsInfo The VMCS info. object.
4113 */
4114DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4115{
4116 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4117 {
4118 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4119 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4120 AssertRC(rc);
4121 }
4122}
4123
4124
4125/**
4126 * Injects an event into the guest upon VM-entry by updating the relevant fields
4127 * in the VM-entry area in the VMCS.
4128 *
4129 * @returns Strict VBox status code (i.e. informational status codes too).
4130 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4131 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4132 *
4133 * @param pVCpu The cross context virtual CPU structure.
4134 * @param pVmcsInfo The VMCS info object.
4135 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4136 * @param pEvent The event being injected.
4137 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4138 * will be updated if necessary. This cannot not be NULL.
4139 * @param fStepping Whether we're single-stepping guest execution and should
4140 * return VINF_EM_DBG_STEPPED if the event is injected
4141 * directly (registers modified by us, not by hardware on
4142 * VM-entry).
4143 */
4144static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4145 bool fStepping, uint32_t *pfIntrState)
4146{
4147 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4148 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4149 Assert(pfIntrState);
4150
4151#ifdef IN_NEM_DARWIN
4152 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4153#endif
4154
4155 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4156 uint32_t u32IntInfo = pEvent->u64IntInfo;
4157 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4158 uint32_t const cbInstr = pEvent->cbInstr;
4159 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4160 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4161 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4162
4163#ifdef VBOX_STRICT
4164 /*
4165 * Validate the error-code-valid bit for hardware exceptions.
4166 * No error codes for exceptions in real-mode.
4167 *
4168 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4169 */
4170 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4171 && !CPUMIsGuestInRealModeEx(pCtx))
4172 {
4173 switch (uVector)
4174 {
4175 case X86_XCPT_PF:
4176 case X86_XCPT_DF:
4177 case X86_XCPT_TS:
4178 case X86_XCPT_NP:
4179 case X86_XCPT_SS:
4180 case X86_XCPT_GP:
4181 case X86_XCPT_AC:
4182 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4183 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4184 RT_FALL_THRU();
4185 default:
4186 break;
4187 }
4188 }
4189
4190 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4191 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4192 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4193#endif
4194
4195 RT_NOREF(uVector);
4196 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4197 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4198 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4199 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4200 {
4201 Assert(uVector <= X86_XCPT_LAST);
4202 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4203 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4204 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4205 }
4206 else
4207 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4208
4209 /*
4210 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4211 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4212 * interrupt handler in the (real-mode) guest.
4213 *
4214 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4215 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4216 */
4217 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4218 {
4219#ifndef IN_NEM_DARWIN
4220 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4221#endif
4222 {
4223 /*
4224 * For CPUs with unrestricted guest execution enabled and with the guest
4225 * in real-mode, we must not set the deliver-error-code bit.
4226 *
4227 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4228 */
4229 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4230 }
4231#ifndef IN_NEM_DARWIN
4232 else
4233 {
4234 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4235 Assert(PDMVmmDevHeapIsEnabled(pVM));
4236 Assert(pVM->hm.s.vmx.pRealModeTSS);
4237 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4238
4239 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4240 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4241 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4242 AssertRCReturn(rc2, rc2);
4243
4244 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4245 size_t const cbIdtEntry = sizeof(X86IDTR16);
4246 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4247 {
4248 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4249 if (uVector == X86_XCPT_DF)
4250 return VINF_EM_RESET;
4251
4252 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4253 No error codes for exceptions in real-mode. */
4254 if (uVector == X86_XCPT_GP)
4255 {
4256 uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4257 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4258 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4259 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4260 HMEVENT EventXcptDf;
4261 RT_ZERO(EventXcptDf);
4262 EventXcptDf.u64IntInfo = uXcptDfInfo;
4263 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptDf, fStepping, pfIntrState);
4264 }
4265
4266 /*
4267 * If we're injecting an event with no valid IDT entry, inject a #GP.
4268 * No error codes for exceptions in real-mode.
4269 *
4270 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4271 */
4272 uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4273 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4274 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4275 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4276 HMEVENT EventXcptGp;
4277 RT_ZERO(EventXcptGp);
4278 EventXcptGp.u64IntInfo = uXcptGpInfo;
4279 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptGp, fStepping, pfIntrState);
4280 }
4281
4282 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4283 uint16_t uGuestIp = pCtx->ip;
4284 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4285 {
4286 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4287 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4288 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4289 }
4290 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4291 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4292
4293 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4294 X86IDTR16 IdtEntry;
4295 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4296 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4297 AssertRCReturn(rc2, rc2);
4298
4299 /* Construct the stack frame for the interrupt/exception handler. */
4300 VBOXSTRICTRC rcStrict;
4301 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4302 if (rcStrict == VINF_SUCCESS)
4303 {
4304 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4305 if (rcStrict == VINF_SUCCESS)
4306 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4307 }
4308
4309 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4310 if (rcStrict == VINF_SUCCESS)
4311 {
4312 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4313 pCtx->rip = IdtEntry.offSel;
4314 pCtx->cs.Sel = IdtEntry.uSel;
4315 pCtx->cs.ValidSel = IdtEntry.uSel;
4316 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4317 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4318 && uVector == X86_XCPT_PF)
4319 pCtx->cr2 = GCPtrFault;
4320
4321 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4322 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4323 | HM_CHANGED_GUEST_RSP);
4324
4325 /*
4326 * If we delivered a hardware exception (other than an NMI) and if there was
4327 * block-by-STI in effect, we should clear it.
4328 */
4329 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4330 {
4331 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4332 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4333 Log4Func(("Clearing inhibition due to STI\n"));
4334 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4335 }
4336
4337 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4338 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4339
4340 /*
4341 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4342 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4343 */
4344 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4345
4346 /*
4347 * If we eventually support nested-guest execution without unrestricted guest execution,
4348 * we should set fInterceptEvents here.
4349 */
4350 Assert(!fIsNestedGuest);
4351
4352 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4353 if (fStepping)
4354 rcStrict = VINF_EM_DBG_STEPPED;
4355 }
4356 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4357 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4358 return rcStrict;
4359 }
4360#else
4361 RT_NOREF(pVmcsInfo);
4362#endif
4363 }
4364
4365 /*
4366 * Validate.
4367 */
4368 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4369 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4370
4371 /*
4372 * Inject the event into the VMCS.
4373 */
4374 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4375 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4376 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4377 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4378 AssertRC(rc);
4379
4380 /*
4381 * Update guest CR2 if this is a page-fault.
4382 */
4383 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4384 pCtx->cr2 = GCPtrFault;
4385
4386 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4387 return VINF_SUCCESS;
4388}
4389
4390
4391/**
4392 * Evaluates the event to be delivered to the guest and sets it as the pending
4393 * event.
4394 *
4395 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4396 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4397 * NOT restore these force-flags.
4398 *
4399 * @returns Strict VBox status code (i.e. informational status codes too).
4400 * @param pVCpu The cross context virtual CPU structure.
4401 * @param pVmcsInfo The VMCS information structure.
4402 * @param fIsNestedGuest Flag whether the evaluation happens for a nestd guest.
4403 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4404 */
4405static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4406{
4407 Assert(pfIntrState);
4408 Assert(!TRPMHasTrap(pVCpu));
4409
4410 /*
4411 * Compute/update guest-interruptibility state related FFs.
4412 * The FFs will be used below while evaluating events to be injected.
4413 */
4414 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4415
4416 /*
4417 * Evaluate if a new event needs to be injected.
4418 * An event that's already pending has already performed all necessary checks.
4419 */
4420 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4421 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4422 {
4423 /** @todo SMI. SMIs take priority over NMIs. */
4424
4425 /*
4426 * NMIs.
4427 * NMIs take priority over external interrupts.
4428 */
4429#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4430 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4431#endif
4432 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4433 {
4434 /*
4435 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4436 *
4437 * For a nested-guest, the FF always indicates the outer guest's ability to
4438 * receive an NMI while the guest-interruptibility state bit depends on whether
4439 * the nested-hypervisor is using virtual-NMIs.
4440 */
4441 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4442 {
4443#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4444 if ( fIsNestedGuest
4445 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4446 return IEMExecVmxVmexitXcptNmi(pVCpu);
4447#endif
4448 vmxHCSetPendingXcptNmi(pVCpu);
4449 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4450 Log4Func(("NMI pending injection\n"));
4451
4452 /* We've injected the NMI, bail. */
4453 return VINF_SUCCESS;
4454 }
4455 else if (!fIsNestedGuest)
4456 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4457 }
4458
4459 /*
4460 * External interrupts (PIC/APIC).
4461 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4462 * We cannot re-request the interrupt from the controller again.
4463 */
4464 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4465 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4466 {
4467 Assert(!DBGFIsStepping(pVCpu));
4468 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4469 AssertRC(rc);
4470
4471 /*
4472 * We must not check EFLAGS directly when executing a nested-guest, use
4473 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4474 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4475 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4476 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4477 *
4478 * See Intel spec. 25.4.1 "Event Blocking".
4479 */
4480 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4481 {
4482#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4483 if ( fIsNestedGuest
4484 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4485 {
4486 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4487 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4488 return rcStrict;
4489 }
4490#endif
4491 uint8_t u8Interrupt;
4492 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4493 if (RT_SUCCESS(rc))
4494 {
4495#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4496 if ( fIsNestedGuest
4497 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4498 {
4499 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4500 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4501 return rcStrict;
4502 }
4503#endif
4504 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4505 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4506 }
4507 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4508 {
4509 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4510
4511 if ( !fIsNestedGuest
4512 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4513 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4514 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4515
4516 /*
4517 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4518 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4519 * need to re-set this force-flag here.
4520 */
4521 }
4522 else
4523 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4524
4525 /* We've injected the interrupt or taken necessary action, bail. */
4526 return VINF_SUCCESS;
4527 }
4528 if (!fIsNestedGuest)
4529 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4530 }
4531 }
4532 else if (!fIsNestedGuest)
4533 {
4534 /*
4535 * An event is being injected or we are in an interrupt shadow. Check if another event is
4536 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
4537 * the pending event.
4538 */
4539 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4540 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4541 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4542 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4543 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4544 }
4545 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
4546
4547 return VINF_SUCCESS;
4548}
4549
4550
4551/**
4552 * Injects any pending events into the guest if the guest is in a state to
4553 * receive them.
4554 *
4555 * @returns Strict VBox status code (i.e. informational status codes too).
4556 * @param pVCpu The cross context virtual CPU structure.
4557 * @param pVmcsInfo The VMCS information structure.
4558 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
4559 * @param fIntrState The VT-x guest-interruptibility state.
4560 * @param fStepping Whether we are single-stepping the guest using the
4561 * hypervisor debugger and should return
4562 * VINF_EM_DBG_STEPPED if the event was dispatched
4563 * directly.
4564 */
4565static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
4566 uint32_t fIntrState, bool fStepping)
4567{
4568 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
4569#ifndef IN_NEM_DARWIN
4570 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4571#endif
4572
4573#ifdef VBOX_STRICT
4574 /*
4575 * Verify guest-interruptibility state.
4576 *
4577 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
4578 * since injecting an event may modify the interruptibility state and we must thus always
4579 * use fIntrState.
4580 */
4581 {
4582 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
4583 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
4584 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
4585 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
4586 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
4587 Assert(!TRPMHasTrap(pVCpu));
4588 NOREF(fBlockMovSS); NOREF(fBlockSti);
4589 }
4590#endif
4591
4592 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4593 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
4594 {
4595 /*
4596 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
4597 * pending even while injecting an event and in this case, we want a VM-exit as soon as
4598 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
4599 *
4600 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
4601 */
4602 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
4603#ifdef VBOX_STRICT
4604 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4605 {
4606 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
4607 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4608 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4609 }
4610 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
4611 {
4612 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
4613 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4614 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4615 }
4616#endif
4617 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
4618 uIntType));
4619
4620 /*
4621 * Inject the event and get any changes to the guest-interruptibility state.
4622 *
4623 * The guest-interruptibility state may need to be updated if we inject the event
4624 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
4625 */
4626 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
4627 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
4628
4629 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4630 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
4631 else
4632 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
4633 }
4634
4635 /*
4636 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
4637 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
4638 */
4639 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
4640 && !fIsNestedGuest)
4641 {
4642 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
4643
4644 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4645 {
4646 /*
4647 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
4648 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
4649 */
4650 Assert(!DBGFIsStepping(pVCpu));
4651 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
4652 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
4653 AssertRC(rc);
4654 }
4655 else
4656 {
4657 /*
4658 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
4659 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
4660 * we take care of this case in vmxHCExportSharedDebugState and also the case if
4661 * we use MTF, so just make sure it's called before executing guest-code.
4662 */
4663 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
4664 }
4665 }
4666 /* else: for nested-guest currently handling while merging controls. */
4667
4668 /*
4669 * Finally, update the guest-interruptibility state.
4670 *
4671 * This is required for the real-on-v86 software interrupt injection, for
4672 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
4673 */
4674 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
4675 AssertRC(rc);
4676
4677 /*
4678 * There's no need to clear the VM-entry interruption-information field here if we're not
4679 * injecting anything. VT-x clears the valid bit on every VM-exit.
4680 *
4681 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
4682 */
4683
4684 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
4685 return rcStrict;
4686}
4687
4688
4689/**
4690 * Tries to determine what part of the guest-state VT-x has deemed as invalid
4691 * and update error record fields accordingly.
4692 *
4693 * @returns VMX_IGS_* error codes.
4694 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
4695 * wrong with the guest state.
4696 *
4697 * @param pVCpu The cross context virtual CPU structure.
4698 * @param pVmcsInfo The VMCS info. object.
4699 *
4700 * @remarks This function assumes our cache of the VMCS controls
4701 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
4702 */
4703static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
4704{
4705#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
4706#define HMVMX_CHECK_BREAK(expr, err) do { \
4707 if (!(expr)) { uError = (err); break; } \
4708 } while (0)
4709
4710 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4711 uint32_t uError = VMX_IGS_ERROR;
4712 uint32_t u32IntrState = 0;
4713#ifndef IN_NEM_DARWIN
4714 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4715 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
4716#else
4717 bool const fUnrestrictedGuest = true;
4718#endif
4719 do
4720 {
4721 int rc;
4722
4723 /*
4724 * Guest-interruptibility state.
4725 *
4726 * Read this first so that any check that fails prior to those that actually
4727 * require the guest-interruptibility state would still reflect the correct
4728 * VMCS value and avoids causing further confusion.
4729 */
4730 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
4731 AssertRC(rc);
4732
4733 uint32_t u32Val;
4734 uint64_t u64Val;
4735
4736 /*
4737 * CR0.
4738 */
4739 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4740 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
4741 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
4742 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
4743 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
4744 if (fUnrestrictedGuest)
4745 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
4746
4747 uint64_t u64GuestCr0;
4748 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
4749 AssertRC(rc);
4750 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
4751 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
4752 if ( !fUnrestrictedGuest
4753 && (u64GuestCr0 & X86_CR0_PG)
4754 && !(u64GuestCr0 & X86_CR0_PE))
4755 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
4756
4757 /*
4758 * CR4.
4759 */
4760 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4761 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
4762 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
4763
4764 uint64_t u64GuestCr4;
4765 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
4766 AssertRC(rc);
4767 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
4768 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
4769
4770 /*
4771 * IA32_DEBUGCTL MSR.
4772 */
4773 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
4774 AssertRC(rc);
4775 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4776 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
4777 {
4778 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
4779 }
4780 uint64_t u64DebugCtlMsr = u64Val;
4781
4782#ifdef VBOX_STRICT
4783 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
4784 AssertRC(rc);
4785 Assert(u32Val == pVmcsInfo->u32EntryCtls);
4786#endif
4787 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4788
4789 /*
4790 * RIP and RFLAGS.
4791 */
4792 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
4793 AssertRC(rc);
4794 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
4795 if ( !fLongModeGuest
4796 || !pCtx->cs.Attr.n.u1Long)
4797 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
4798 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
4799 * must be identical if the "IA-32e mode guest" VM-entry
4800 * control is 1 and CS.L is 1. No check applies if the
4801 * CPU supports 64 linear-address bits. */
4802
4803 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
4804 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
4805 AssertRC(rc);
4806 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
4807 VMX_IGS_RFLAGS_RESERVED);
4808 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
4809 uint32_t const u32Eflags = u64Val;
4810
4811 if ( fLongModeGuest
4812 || ( fUnrestrictedGuest
4813 && !(u64GuestCr0 & X86_CR0_PE)))
4814 {
4815 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
4816 }
4817
4818 uint32_t u32EntryInfo;
4819 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
4820 AssertRC(rc);
4821 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
4822 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
4823
4824 /*
4825 * 64-bit checks.
4826 */
4827 if (fLongModeGuest)
4828 {
4829 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
4830 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
4831 }
4832
4833 if ( !fLongModeGuest
4834 && (u64GuestCr4 & X86_CR4_PCIDE))
4835 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
4836
4837 /** @todo CR3 field must be such that bits 63:52 and bits in the range
4838 * 51:32 beyond the processor's physical-address width are 0. */
4839
4840 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4841 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
4842 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
4843
4844#ifndef IN_NEM_DARWIN
4845 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
4846 AssertRC(rc);
4847 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
4848
4849 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
4850 AssertRC(rc);
4851 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
4852#endif
4853
4854 /*
4855 * PERF_GLOBAL MSR.
4856 */
4857 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
4858 {
4859 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
4860 AssertRC(rc);
4861 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
4862 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
4863 }
4864
4865 /*
4866 * PAT MSR.
4867 */
4868 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4869 {
4870 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
4871 AssertRC(rc);
4872 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
4873 for (unsigned i = 0; i < 8; i++)
4874 {
4875 uint8_t u8Val = (u64Val & 0xff);
4876 if ( u8Val != 0 /* UC */
4877 && u8Val != 1 /* WC */
4878 && u8Val != 4 /* WT */
4879 && u8Val != 5 /* WP */
4880 && u8Val != 6 /* WB */
4881 && u8Val != 7 /* UC- */)
4882 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
4883 u64Val >>= 8;
4884 }
4885 }
4886
4887 /*
4888 * EFER MSR.
4889 */
4890 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4891 {
4892 Assert(g_fHmVmxSupportsVmcsEfer);
4893 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
4894 AssertRC(rc);
4895 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
4896 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
4897 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
4898 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
4899 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
4900 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
4901 * iemVmxVmentryCheckGuestState(). */
4902 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4903 || !(u64GuestCr0 & X86_CR0_PG)
4904 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
4905 VMX_IGS_EFER_LMA_LME_MISMATCH);
4906 }
4907
4908 /*
4909 * Segment registers.
4910 */
4911 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
4912 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
4913 if (!(u32Eflags & X86_EFL_VM))
4914 {
4915 /* CS */
4916 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
4917 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
4918 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
4919 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4920 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4921 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
4922 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4923 /* CS cannot be loaded with NULL in protected mode. */
4924 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
4925 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
4926 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4927 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
4928 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4929 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
4930 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
4931 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
4932 else
4933 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
4934
4935 /* SS */
4936 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4937 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
4938 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
4939 if ( !(pCtx->cr0 & X86_CR0_PE)
4940 || pCtx->cs.Attr.n.u4Type == 3)
4941 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
4942
4943 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4944 {
4945 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
4946 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
4947 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
4948 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
4949 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4950 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4951 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
4952 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4953 }
4954
4955 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
4956 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4957 {
4958 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
4959 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
4960 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4961 || pCtx->ds.Attr.n.u4Type > 11
4962 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4963 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
4964 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
4965 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4966 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4967 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
4968 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4969 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4970 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
4971 }
4972 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4973 {
4974 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
4975 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
4976 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4977 || pCtx->es.Attr.n.u4Type > 11
4978 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4979 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
4980 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
4981 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
4982 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
4983 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
4984 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
4985 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4986 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
4987 }
4988 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
4989 {
4990 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
4991 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
4992 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4993 || pCtx->fs.Attr.n.u4Type > 11
4994 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
4995 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
4996 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
4997 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
4998 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
4999 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5000 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5001 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5002 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5003 }
5004 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5005 {
5006 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5007 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5008 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5009 || pCtx->gs.Attr.n.u4Type > 11
5010 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5011 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5012 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5013 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5014 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5015 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5016 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5017 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5018 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5019 }
5020 /* 64-bit capable CPUs. */
5021 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5022 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5023 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5024 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5025 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5026 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5027 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5028 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5029 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5030 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5031 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5032 }
5033 else
5034 {
5035 /* V86 mode checks. */
5036 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5037 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5038 {
5039 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5040 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5041 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5042 }
5043 else
5044 {
5045 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5046 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5047 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5048 }
5049
5050 /* CS */
5051 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5052 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5053 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5054 /* SS */
5055 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5056 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5057 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5058 /* DS */
5059 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5060 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5061 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5062 /* ES */
5063 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5064 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5065 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5066 /* FS */
5067 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5068 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5069 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5070 /* GS */
5071 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5072 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5073 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5074 /* 64-bit capable CPUs. */
5075 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5076 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5077 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5078 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5079 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5080 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5081 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5082 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5083 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5084 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5085 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5086 }
5087
5088 /*
5089 * TR.
5090 */
5091 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5092 /* 64-bit capable CPUs. */
5093 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5094 if (fLongModeGuest)
5095 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5096 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5097 else
5098 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5099 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5100 VMX_IGS_TR_ATTR_TYPE_INVALID);
5101 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5102 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5103 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5104 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5105 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5106 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5107 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5108 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5109
5110 /*
5111 * GDTR and IDTR (64-bit capable checks).
5112 */
5113 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5114 AssertRC(rc);
5115 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5116
5117 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5118 AssertRC(rc);
5119 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5120
5121 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5122 AssertRC(rc);
5123 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5124
5125 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5126 AssertRC(rc);
5127 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5128
5129 /*
5130 * Guest Non-Register State.
5131 */
5132 /* Activity State. */
5133 uint32_t u32ActivityState;
5134 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5135 AssertRC(rc);
5136 HMVMX_CHECK_BREAK( !u32ActivityState
5137 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5138 VMX_IGS_ACTIVITY_STATE_INVALID);
5139 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5140 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5141
5142 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5143 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5144 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5145
5146 /** @todo Activity state and injecting interrupts. Left as a todo since we
5147 * currently don't use activity states but ACTIVE. */
5148
5149 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5150 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5151
5152 /* Guest interruptibility-state. */
5153 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5154 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5155 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5156 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5157 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5158 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5159 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5160 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5161 {
5162 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5163 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5164 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5165 }
5166 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5167 {
5168 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5169 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5170 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5171 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5172 }
5173 /** @todo Assumes the processor is not in SMM. */
5174 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5175 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5176 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5177 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5178 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5179 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5180 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5181 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5182
5183 /* Pending debug exceptions. */
5184 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5185 AssertRC(rc);
5186 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5187 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5188 u32Val = u64Val; /* For pending debug exceptions checks below. */
5189
5190 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5191 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5192 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5193 {
5194 if ( (u32Eflags & X86_EFL_TF)
5195 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5196 {
5197 /* Bit 14 is PendingDebug.BS. */
5198 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5199 }
5200 if ( !(u32Eflags & X86_EFL_TF)
5201 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5202 {
5203 /* Bit 14 is PendingDebug.BS. */
5204 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5205 }
5206 }
5207
5208#ifndef IN_NEM_DARWIN
5209 /* VMCS link pointer. */
5210 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5211 AssertRC(rc);
5212 if (u64Val != UINT64_C(0xffffffffffffffff))
5213 {
5214 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5215 /** @todo Bits beyond the processor's physical-address width MBZ. */
5216 /** @todo SMM checks. */
5217 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5218 Assert(pVmcsInfo->pvShadowVmcs);
5219 VMXVMCSREVID VmcsRevId;
5220 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5221 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5222 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5223 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5224 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5225 }
5226
5227 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5228 * not using nested paging? */
5229 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5230 && !fLongModeGuest
5231 && CPUMIsGuestInPAEModeEx(pCtx))
5232 {
5233 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5234 AssertRC(rc);
5235 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5236
5237 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5238 AssertRC(rc);
5239 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5240
5241 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5242 AssertRC(rc);
5243 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5244
5245 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5246 AssertRC(rc);
5247 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5248 }
5249#endif
5250
5251 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5252 if (uError == VMX_IGS_ERROR)
5253 uError = VMX_IGS_REASON_NOT_FOUND;
5254 } while (0);
5255
5256 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5257 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5258 return uError;
5259
5260#undef HMVMX_ERROR_BREAK
5261#undef HMVMX_CHECK_BREAK
5262}
5263
5264
5265#ifndef HMVMX_USE_FUNCTION_TABLE
5266/**
5267 * Handles a guest VM-exit from hardware-assisted VMX execution.
5268 *
5269 * @returns Strict VBox status code (i.e. informational status codes too).
5270 * @param pVCpu The cross context virtual CPU structure.
5271 * @param pVmxTransient The VMX-transient structure.
5272 */
5273DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5274{
5275#ifdef DEBUG_ramshankar
5276# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5277 do { \
5278 if (a_fSave != 0) \
5279 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
5280 VBOXSTRICTRC rcStrict = a_CallExpr; \
5281 if (a_fSave != 0) \
5282 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5283 return rcStrict; \
5284 } while (0)
5285#else
5286# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5287#endif
5288 uint32_t const uExitReason = pVmxTransient->uExitReason;
5289 switch (uExitReason)
5290 {
5291 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5292 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5293 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5294 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5295 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5296 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5297 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5298 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5299 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5300 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5301 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5302 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5303 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5304 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5305 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5306 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5307 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5308 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5309 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5310 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5311 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5312 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5313 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5314 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5315 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5316 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5317 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5318 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5319 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5320 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5321#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5322 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5323 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5324 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5325 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5326 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5327 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5328 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5329 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5330 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5331 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5332#else
5333 case VMX_EXIT_VMCLEAR:
5334 case VMX_EXIT_VMLAUNCH:
5335 case VMX_EXIT_VMPTRLD:
5336 case VMX_EXIT_VMPTRST:
5337 case VMX_EXIT_VMREAD:
5338 case VMX_EXIT_VMRESUME:
5339 case VMX_EXIT_VMWRITE:
5340 case VMX_EXIT_VMXOFF:
5341 case VMX_EXIT_VMXON:
5342 case VMX_EXIT_INVVPID:
5343 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5344#endif
5345#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT)
5346 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5347#else
5348 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5349#endif
5350
5351 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5352 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5353 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5354
5355 case VMX_EXIT_INIT_SIGNAL:
5356 case VMX_EXIT_SIPI:
5357 case VMX_EXIT_IO_SMI:
5358 case VMX_EXIT_SMI:
5359 case VMX_EXIT_ERR_MSR_LOAD:
5360 case VMX_EXIT_ERR_MACHINE_CHECK:
5361 case VMX_EXIT_PML_FULL:
5362 case VMX_EXIT_VIRTUALIZED_EOI:
5363 case VMX_EXIT_GDTR_IDTR_ACCESS:
5364 case VMX_EXIT_LDTR_TR_ACCESS:
5365 case VMX_EXIT_APIC_WRITE:
5366 case VMX_EXIT_RDRAND:
5367 case VMX_EXIT_RSM:
5368 case VMX_EXIT_VMFUNC:
5369 case VMX_EXIT_ENCLS:
5370 case VMX_EXIT_RDSEED:
5371 case VMX_EXIT_XSAVES:
5372 case VMX_EXIT_XRSTORS:
5373 case VMX_EXIT_UMWAIT:
5374 case VMX_EXIT_TPAUSE:
5375 case VMX_EXIT_LOADIWKEY:
5376 default:
5377 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5378 }
5379#undef VMEXIT_CALL_RET
5380}
5381#endif /* !HMVMX_USE_FUNCTION_TABLE */
5382
5383
5384#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5385/**
5386 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5387 *
5388 * @returns Strict VBox status code (i.e. informational status codes too).
5389 * @param pVCpu The cross context virtual CPU structure.
5390 * @param pVmxTransient The VMX-transient structure.
5391 */
5392DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5393{
5394 uint32_t const uExitReason = pVmxTransient->uExitReason;
5395 switch (uExitReason)
5396 {
5397# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5398 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5399 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5400# else
5401 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5402 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5403# endif
5404 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5405 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5406 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5407
5408 /*
5409 * We shouldn't direct host physical interrupts to the nested-guest.
5410 */
5411 case VMX_EXIT_EXT_INT:
5412 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5413
5414 /*
5415 * Instructions that cause VM-exits unconditionally or the condition is
5416 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5417 * happens, it's guaranteed to be a nested-guest VM-exit).
5418 *
5419 * - Provides VM-exit instruction length ONLY.
5420 */
5421 case VMX_EXIT_CPUID: /* Unconditional. */
5422 case VMX_EXIT_VMCALL:
5423 case VMX_EXIT_GETSEC:
5424 case VMX_EXIT_INVD:
5425 case VMX_EXIT_XSETBV:
5426 case VMX_EXIT_VMLAUNCH:
5427 case VMX_EXIT_VMRESUME:
5428 case VMX_EXIT_VMXOFF:
5429 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5430 case VMX_EXIT_VMFUNC:
5431 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5432
5433 /*
5434 * Instructions that cause VM-exits unconditionally or the condition is
5435 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5436 * happens, it's guaranteed to be a nested-guest VM-exit).
5437 *
5438 * - Provides VM-exit instruction length.
5439 * - Provides VM-exit information.
5440 * - Optionally provides Exit qualification.
5441 *
5442 * Since Exit qualification is 0 for all VM-exits where it is not
5443 * applicable, reading and passing it to the guest should produce
5444 * defined behavior.
5445 *
5446 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5447 */
5448 case VMX_EXIT_INVEPT: /* Unconditional. */
5449 case VMX_EXIT_INVVPID:
5450 case VMX_EXIT_VMCLEAR:
5451 case VMX_EXIT_VMPTRLD:
5452 case VMX_EXIT_VMPTRST:
5453 case VMX_EXIT_VMXON:
5454 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5455 case VMX_EXIT_LDTR_TR_ACCESS:
5456 case VMX_EXIT_RDRAND:
5457 case VMX_EXIT_RDSEED:
5458 case VMX_EXIT_XSAVES:
5459 case VMX_EXIT_XRSTORS:
5460 case VMX_EXIT_UMWAIT:
5461 case VMX_EXIT_TPAUSE:
5462 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5463
5464 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5465 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5466 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5467 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5468 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5469 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5470 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5471 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5472 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5473 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5474 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5475 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5476 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5477 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5478 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5479 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5480 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5481 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5482 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5483
5484 case VMX_EXIT_PREEMPT_TIMER:
5485 {
5486 /** @todo NSTVMX: Preempt timer. */
5487 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5488 }
5489
5490 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5491 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
5492
5493 case VMX_EXIT_VMREAD:
5494 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
5495
5496 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
5497 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
5498
5499 case VMX_EXIT_INIT_SIGNAL:
5500 case VMX_EXIT_SIPI:
5501 case VMX_EXIT_IO_SMI:
5502 case VMX_EXIT_SMI:
5503 case VMX_EXIT_ERR_MSR_LOAD:
5504 case VMX_EXIT_ERR_MACHINE_CHECK:
5505 case VMX_EXIT_PML_FULL:
5506 case VMX_EXIT_RSM:
5507 default:
5508 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5509 }
5510}
5511#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5512
5513
5514/** @name VM-exit helpers.
5515 * @{
5516 */
5517/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5518/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5519/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5520
5521/** Macro for VM-exits called unexpectedly. */
5522#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5523 do { \
5524 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
5525 return VERR_VMX_UNEXPECTED_EXIT; \
5526 } while (0)
5527
5528#ifdef VBOX_STRICT
5529# ifndef IN_NEM_DARWIN
5530/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5531# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
5532 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5533
5534# define HMVMX_ASSERT_PREEMPT_CPUID() \
5535 do { \
5536 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
5537 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
5538 } while (0)
5539
5540# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5541 do { \
5542 AssertPtr((a_pVCpu)); \
5543 AssertPtr((a_pVmxTransient)); \
5544 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5545 Assert((a_pVmxTransient)->pVmcsInfo); \
5546 Assert(ASMIntAreEnabled()); \
5547 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5548 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
5549 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5550 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5551 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
5552 HMVMX_ASSERT_PREEMPT_CPUID(); \
5553 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5554 } while (0)
5555# else
5556# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
5557# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
5558# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5559 do { \
5560 AssertPtr((a_pVCpu)); \
5561 AssertPtr((a_pVmxTransient)); \
5562 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5563 Assert((a_pVmxTransient)->pVmcsInfo); \
5564 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5565 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5566 } while (0)
5567# endif
5568
5569# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5570 do { \
5571 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
5572 Assert((a_pVmxTransient)->fIsNestedGuest); \
5573 } while (0)
5574
5575# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5576 do { \
5577 Log4Func(("\n")); \
5578 } while (0)
5579#else
5580# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5581 do { \
5582 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5583 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
5584 } while (0)
5585
5586# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5587 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
5588
5589# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
5590#endif
5591
5592#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5593/** Macro that does the necessary privilege checks and intercepted VM-exits for
5594 * guests that attempted to execute a VMX instruction. */
5595# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
5596 do \
5597 { \
5598 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
5599 if (rcStrictTmp == VINF_SUCCESS) \
5600 { /* likely */ } \
5601 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5602 { \
5603 Assert((a_pVCpu)->hm.s.Event.fPending); \
5604 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
5605 return VINF_SUCCESS; \
5606 } \
5607 else \
5608 { \
5609 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
5610 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
5611 } \
5612 } while (0)
5613
5614/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
5615# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
5616 do \
5617 { \
5618 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
5619 (a_pGCPtrEffAddr)); \
5620 if (rcStrictTmp == VINF_SUCCESS) \
5621 { /* likely */ } \
5622 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5623 { \
5624 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
5625 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
5626 NOREF(uXcptTmp); \
5627 return VINF_SUCCESS; \
5628 } \
5629 else \
5630 { \
5631 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
5632 return rcStrictTmp; \
5633 } \
5634 } while (0)
5635#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5636
5637
5638/**
5639 * Advances the guest RIP by the specified number of bytes.
5640 *
5641 * @param pVCpu The cross context virtual CPU structure.
5642 * @param cbInstr Number of bytes to advance the RIP by.
5643 *
5644 * @remarks No-long-jump zone!!!
5645 */
5646DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
5647{
5648 /* Advance the RIP. */
5649 pVCpu->cpum.GstCtx.rip += cbInstr;
5650 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
5651
5652 /* Update interrupt inhibition. */
5653 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5654 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
5655 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5656}
5657
5658
5659/**
5660 * Advances the guest RIP after reading it from the VMCS.
5661 *
5662 * @returns VBox status code, no informational status codes.
5663 * @param pVCpu The cross context virtual CPU structure.
5664 * @param pVmxTransient The VMX-transient structure.
5665 *
5666 * @remarks No-long-jump zone!!!
5667 */
5668static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5669{
5670 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
5671 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
5672 AssertRCReturn(rc, rc);
5673
5674 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
5675 return VINF_SUCCESS;
5676}
5677
5678
5679/**
5680 * Handle a condition that occurred while delivering an event through the guest or
5681 * nested-guest IDT.
5682 *
5683 * @returns Strict VBox status code (i.e. informational status codes too).
5684 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5685 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
5686 * to continue execution of the guest which will delivery the \#DF.
5687 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5688 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5689 *
5690 * @param pVCpu The cross context virtual CPU structure.
5691 * @param pVmxTransient The VMX-transient structure.
5692 *
5693 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
5694 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
5695 * is due to an EPT violation, PML full or SPP-related event.
5696 *
5697 * @remarks No-long-jump zone!!!
5698 */
5699static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5700{
5701 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
5702 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
5703 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5704 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5705 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5706 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
5707
5708 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5709 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5710 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
5711 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
5712 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
5713 {
5714 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
5715 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
5716
5717 /*
5718 * If the event was a software interrupt (generated with INT n) or a software exception
5719 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
5720 * can handle the VM-exit and continue guest execution which will re-execute the
5721 * instruction rather than re-injecting the exception, as that can cause premature
5722 * trips to ring-3 before injection and involve TRPM which currently has no way of
5723 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
5724 * the problem).
5725 */
5726 IEMXCPTRAISE enmRaise;
5727 IEMXCPTRAISEINFO fRaiseInfo;
5728 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5729 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5730 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5731 {
5732 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5733 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5734 }
5735 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
5736 {
5737 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
5738 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
5739 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
5740
5741 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
5742 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
5743
5744 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5745
5746 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
5747 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5748 {
5749 pVmxTransient->fVectoringPF = true;
5750 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5751 }
5752 }
5753 else
5754 {
5755 /*
5756 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
5757 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
5758 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
5759 */
5760 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5761 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5762 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
5763 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5764 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5765 }
5766
5767 /*
5768 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
5769 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
5770 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
5771 * subsequent VM-entry would fail, see @bugref{7445}.
5772 *
5773 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
5774 */
5775 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5776 && enmRaise == IEMXCPTRAISE_PREV_EVENT
5777 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5778 && CPUMIsGuestNmiBlocking(pVCpu))
5779 {
5780 CPUMSetGuestNmiBlocking(pVCpu, false);
5781 }
5782
5783 switch (enmRaise)
5784 {
5785 case IEMXCPTRAISE_CURRENT_XCPT:
5786 {
5787 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
5788 Assert(rcStrict == VINF_SUCCESS);
5789 break;
5790 }
5791
5792 case IEMXCPTRAISE_PREV_EVENT:
5793 {
5794 uint32_t u32ErrCode;
5795 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
5796 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5797 else
5798 u32ErrCode = 0;
5799
5800 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
5801 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
5802 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */,
5803 u32ErrCode, pVCpu->cpum.GstCtx.cr2);
5804
5805 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5806 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
5807 Assert(rcStrict == VINF_SUCCESS);
5808 break;
5809 }
5810
5811 case IEMXCPTRAISE_REEXEC_INSTR:
5812 Assert(rcStrict == VINF_SUCCESS);
5813 break;
5814
5815 case IEMXCPTRAISE_DOUBLE_FAULT:
5816 {
5817 /*
5818 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
5819 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
5820 */
5821 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
5822 {
5823 pVmxTransient->fVectoringDoublePF = true;
5824 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5825 pVCpu->cpum.GstCtx.cr2));
5826 rcStrict = VINF_SUCCESS;
5827 }
5828 else
5829 {
5830 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
5831 vmxHCSetPendingXcptDF(pVCpu);
5832 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5833 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5834 rcStrict = VINF_HM_DOUBLE_FAULT;
5835 }
5836 break;
5837 }
5838
5839 case IEMXCPTRAISE_TRIPLE_FAULT:
5840 {
5841 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
5842 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5843 rcStrict = VINF_EM_RESET;
5844 break;
5845 }
5846
5847 case IEMXCPTRAISE_CPU_HANG:
5848 {
5849 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
5850 rcStrict = VERR_EM_GUEST_CPU_HANG;
5851 break;
5852 }
5853
5854 default:
5855 {
5856 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
5857 rcStrict = VERR_VMX_IPE_2;
5858 break;
5859 }
5860 }
5861 }
5862 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5863 && !CPUMIsGuestNmiBlocking(pVCpu))
5864 {
5865 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
5866 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
5867 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
5868 {
5869 /*
5870 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
5871 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5872 * that virtual NMIs remain blocked until the IRET execution is completed.
5873 *
5874 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
5875 */
5876 CPUMSetGuestNmiBlocking(pVCpu, true);
5877 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5878 }
5879 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5880 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5881 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5882 {
5883 /*
5884 * Execution of IRET caused an EPT violation, page-modification log-full event or
5885 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
5886 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5887 * that virtual NMIs remain blocked until the IRET execution is completed.
5888 *
5889 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
5890 */
5891 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
5892 {
5893 CPUMSetGuestNmiBlocking(pVCpu, true);
5894 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5895 }
5896 }
5897 }
5898
5899 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
5900 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
5901 return rcStrict;
5902}
5903
5904
5905#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5906/**
5907 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
5908 * guest attempting to execute a VMX instruction.
5909 *
5910 * @returns Strict VBox status code (i.e. informational status codes too).
5911 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5912 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
5913 *
5914 * @param pVCpu The cross context virtual CPU structure.
5915 * @param uExitReason The VM-exit reason.
5916 *
5917 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
5918 * @remarks No-long-jump zone!!!
5919 */
5920static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
5921{
5922 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
5923 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
5924
5925 /*
5926 * The physical CPU would have already checked the CPU mode/code segment.
5927 * We shall just assert here for paranoia.
5928 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
5929 */
5930 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
5931 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
5932 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
5933
5934 if (uExitReason == VMX_EXIT_VMXON)
5935 {
5936 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5937
5938 /*
5939 * We check CR4.VMXE because it is required to be always set while in VMX operation
5940 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
5941 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
5942 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
5943 */
5944 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
5945 {
5946 Log4Func(("CR4.VMXE is not set -> #UD\n"));
5947 vmxHCSetPendingXcptUD(pVCpu);
5948 return VINF_HM_PENDING_XCPT;
5949 }
5950 }
5951 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
5952 {
5953 /*
5954 * The guest has not entered VMX operation but attempted to execute a VMX instruction
5955 * (other than VMXON), we need to raise a #UD.
5956 */
5957 Log4Func(("Not in VMX root mode -> #UD\n"));
5958 vmxHCSetPendingXcptUD(pVCpu);
5959 return VINF_HM_PENDING_XCPT;
5960 }
5961
5962 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
5963 return VINF_SUCCESS;
5964}
5965
5966
5967/**
5968 * Decodes the memory operand of an instruction that caused a VM-exit.
5969 *
5970 * The Exit qualification field provides the displacement field for memory
5971 * operand instructions, if any.
5972 *
5973 * @returns Strict VBox status code (i.e. informational status codes too).
5974 * @retval VINF_SUCCESS if the operand was successfully decoded.
5975 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
5976 * operand.
5977 * @param pVCpu The cross context virtual CPU structure.
5978 * @param uExitInstrInfo The VM-exit instruction information field.
5979 * @param enmMemAccess The memory operand's access type (read or write).
5980 * @param GCPtrDisp The instruction displacement field, if any. For
5981 * RIP-relative addressing pass RIP + displacement here.
5982 * @param pGCPtrMem Where to store the effective destination memory address.
5983 *
5984 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
5985 * virtual-8086 mode hence skips those checks while verifying if the
5986 * segment is valid.
5987 */
5988static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
5989 PRTGCPTR pGCPtrMem)
5990{
5991 Assert(pGCPtrMem);
5992 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
5993 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
5994 | CPUMCTX_EXTRN_CR0);
5995
5996 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
5997 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
5998 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
5999
6000 VMXEXITINSTRINFO ExitInstrInfo;
6001 ExitInstrInfo.u = uExitInstrInfo;
6002 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6003 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6004 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6005 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6006 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6007 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6008 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6009 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6010 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6011
6012 /*
6013 * Validate instruction information.
6014 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6015 */
6016 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6017 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6018 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6019 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6020 AssertLogRelMsgReturn(fIsMemOperand,
6021 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6022
6023 /*
6024 * Compute the complete effective address.
6025 *
6026 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6027 * See AMD spec. 4.5.2 "Segment Registers".
6028 */
6029 RTGCPTR GCPtrMem = GCPtrDisp;
6030 if (fBaseRegValid)
6031 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6032 if (fIdxRegValid)
6033 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6034
6035 RTGCPTR const GCPtrOff = GCPtrMem;
6036 if ( !fIsLongMode
6037 || iSegReg >= X86_SREG_FS)
6038 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6039 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6040
6041 /*
6042 * Validate effective address.
6043 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6044 */
6045 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6046 Assert(cbAccess > 0);
6047 if (fIsLongMode)
6048 {
6049 if (X86_IS_CANONICAL(GCPtrMem))
6050 {
6051 *pGCPtrMem = GCPtrMem;
6052 return VINF_SUCCESS;
6053 }
6054
6055 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6056 * "Data Limit Checks in 64-bit Mode". */
6057 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6058 vmxHCSetPendingXcptGP(pVCpu, 0);
6059 return VINF_HM_PENDING_XCPT;
6060 }
6061
6062 /*
6063 * This is a watered down version of iemMemApplySegment().
6064 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6065 * and segment CPL/DPL checks are skipped.
6066 */
6067 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6068 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6069 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6070
6071 /* Check if the segment is present and usable. */
6072 if ( pSel->Attr.n.u1Present
6073 && !pSel->Attr.n.u1Unusable)
6074 {
6075 Assert(pSel->Attr.n.u1DescType);
6076 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6077 {
6078 /* Check permissions for the data segment. */
6079 if ( enmMemAccess == VMXMEMACCESS_WRITE
6080 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6081 {
6082 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6083 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6084 return VINF_HM_PENDING_XCPT;
6085 }
6086
6087 /* Check limits if it's a normal data segment. */
6088 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6089 {
6090 if ( GCPtrFirst32 > pSel->u32Limit
6091 || GCPtrLast32 > pSel->u32Limit)
6092 {
6093 Log4Func(("Data segment limit exceeded. "
6094 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6095 GCPtrLast32, pSel->u32Limit));
6096 if (iSegReg == X86_SREG_SS)
6097 vmxHCSetPendingXcptSS(pVCpu, 0);
6098 else
6099 vmxHCSetPendingXcptGP(pVCpu, 0);
6100 return VINF_HM_PENDING_XCPT;
6101 }
6102 }
6103 else
6104 {
6105 /* Check limits if it's an expand-down data segment.
6106 Note! The upper boundary is defined by the B bit, not the G bit! */
6107 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6108 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6109 {
6110 Log4Func(("Expand-down data segment limit exceeded. "
6111 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6112 GCPtrLast32, pSel->u32Limit));
6113 if (iSegReg == X86_SREG_SS)
6114 vmxHCSetPendingXcptSS(pVCpu, 0);
6115 else
6116 vmxHCSetPendingXcptGP(pVCpu, 0);
6117 return VINF_HM_PENDING_XCPT;
6118 }
6119 }
6120 }
6121 else
6122 {
6123 /* Check permissions for the code segment. */
6124 if ( enmMemAccess == VMXMEMACCESS_WRITE
6125 || ( enmMemAccess == VMXMEMACCESS_READ
6126 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6127 {
6128 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6129 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6130 vmxHCSetPendingXcptGP(pVCpu, 0);
6131 return VINF_HM_PENDING_XCPT;
6132 }
6133
6134 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6135 if ( GCPtrFirst32 > pSel->u32Limit
6136 || GCPtrLast32 > pSel->u32Limit)
6137 {
6138 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6139 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6140 if (iSegReg == X86_SREG_SS)
6141 vmxHCSetPendingXcptSS(pVCpu, 0);
6142 else
6143 vmxHCSetPendingXcptGP(pVCpu, 0);
6144 return VINF_HM_PENDING_XCPT;
6145 }
6146 }
6147 }
6148 else
6149 {
6150 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6151 vmxHCSetPendingXcptGP(pVCpu, 0);
6152 return VINF_HM_PENDING_XCPT;
6153 }
6154
6155 *pGCPtrMem = GCPtrMem;
6156 return VINF_SUCCESS;
6157}
6158#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6159
6160
6161/**
6162 * VM-exit helper for LMSW.
6163 */
6164static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6165{
6166 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6167 AssertRCReturn(rc, rc);
6168
6169 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6170 AssertMsg( rcStrict == VINF_SUCCESS
6171 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6172
6173 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6174 if (rcStrict == VINF_IEM_RAISED_XCPT)
6175 {
6176 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6177 rcStrict = VINF_SUCCESS;
6178 }
6179
6180 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6181 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6182 return rcStrict;
6183}
6184
6185
6186/**
6187 * VM-exit helper for CLTS.
6188 */
6189static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6190{
6191 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6192 AssertRCReturn(rc, rc);
6193
6194 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6195 AssertMsg( rcStrict == VINF_SUCCESS
6196 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6197
6198 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6199 if (rcStrict == VINF_IEM_RAISED_XCPT)
6200 {
6201 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6202 rcStrict = VINF_SUCCESS;
6203 }
6204
6205 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6206 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6207 return rcStrict;
6208}
6209
6210
6211/**
6212 * VM-exit helper for MOV from CRx (CRx read).
6213 */
6214static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6215{
6216 Assert(iCrReg < 16);
6217 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6218
6219 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6220 AssertRCReturn(rc, rc);
6221
6222 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6223 AssertMsg( rcStrict == VINF_SUCCESS
6224 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6225
6226 if (iGReg == X86_GREG_xSP)
6227 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6228 else
6229 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6230#ifdef VBOX_WITH_STATISTICS
6231 switch (iCrReg)
6232 {
6233 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6234 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6235 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6236 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6237 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6238 }
6239#endif
6240 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6241 return rcStrict;
6242}
6243
6244
6245/**
6246 * VM-exit helper for MOV to CRx (CRx write).
6247 */
6248static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6249{
6250 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6251
6252 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6253 AssertMsg( rcStrict == VINF_SUCCESS
6254 || rcStrict == VINF_IEM_RAISED_XCPT
6255 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6256
6257 switch (iCrReg)
6258 {
6259 case 0:
6260 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6261 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6262 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6263 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6264 break;
6265
6266 case 2:
6267 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6268 /* Nothing to do here, CR2 it's not part of the VMCS. */
6269 break;
6270
6271 case 3:
6272 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6273 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6274 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6275 break;
6276
6277 case 4:
6278 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6279 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6280#ifndef IN_NEM_DARWIN
6281 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6282 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6283#else
6284 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6285#endif
6286 break;
6287
6288 case 8:
6289 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6290 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6291 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6292 break;
6293
6294 default:
6295 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6296 break;
6297 }
6298
6299 if (rcStrict == VINF_IEM_RAISED_XCPT)
6300 {
6301 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6302 rcStrict = VINF_SUCCESS;
6303 }
6304 return rcStrict;
6305}
6306
6307
6308/**
6309 * VM-exit exception handler for \#PF (Page-fault exception).
6310 *
6311 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6312 */
6313static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6314{
6315 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6316 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6317
6318#ifndef IN_NEM_DARWIN
6319 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6320 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6321 { /* likely */ }
6322 else
6323#endif
6324 {
6325#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6326 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6327#endif
6328 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6329 if (!pVmxTransient->fVectoringDoublePF)
6330 {
6331 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6332 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6333 }
6334 else
6335 {
6336 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6337 Assert(!pVmxTransient->fIsNestedGuest);
6338 vmxHCSetPendingXcptDF(pVCpu);
6339 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6340 }
6341 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6342 return VINF_SUCCESS;
6343 }
6344
6345 Assert(!pVmxTransient->fIsNestedGuest);
6346
6347 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6348 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6349 if (pVmxTransient->fVectoringPF)
6350 {
6351 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6352 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6353 }
6354
6355 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6356 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6357 AssertRCReturn(rc, rc);
6358
6359 Log4Func(("#PF: cs:rip=%#04x:%#RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
6360 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
6361
6362 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6363 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
6364
6365 Log4Func(("#PF: rc=%Rrc\n", rc));
6366 if (rc == VINF_SUCCESS)
6367 {
6368 /*
6369 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6370 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6371 */
6372 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6373 TRPMResetTrap(pVCpu);
6374 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6375 return rc;
6376 }
6377
6378 if (rc == VINF_EM_RAW_GUEST_TRAP)
6379 {
6380 if (!pVmxTransient->fVectoringDoublePF)
6381 {
6382 /* It's a guest page fault and needs to be reflected to the guest. */
6383 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6384 TRPMResetTrap(pVCpu);
6385 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6386 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6387 uGstErrorCode, pVmxTransient->uExitQual);
6388 }
6389 else
6390 {
6391 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6392 TRPMResetTrap(pVCpu);
6393 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6394 vmxHCSetPendingXcptDF(pVCpu);
6395 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6396 }
6397
6398 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6399 return VINF_SUCCESS;
6400 }
6401
6402 TRPMResetTrap(pVCpu);
6403 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6404 return rc;
6405}
6406
6407
6408/**
6409 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6410 *
6411 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6412 */
6413static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6414{
6415 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6416 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6417
6418 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
6419 AssertRCReturn(rc, rc);
6420
6421 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6422 {
6423 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6424 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6425
6426 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6427 * provides VM-exit instruction length. If this causes problem later,
6428 * disassemble the instruction like it's done on AMD-V. */
6429 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6430 AssertRCReturn(rc2, rc2);
6431 return rc;
6432 }
6433
6434 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6435 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6436 return VINF_SUCCESS;
6437}
6438
6439
6440/**
6441 * VM-exit exception handler for \#BP (Breakpoint exception).
6442 *
6443 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6444 */
6445static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6446{
6447 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6448 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6449
6450 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6451 AssertRCReturn(rc, rc);
6452
6453 VBOXSTRICTRC rcStrict;
6454 if (!pVmxTransient->fIsNestedGuest)
6455 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
6456 else
6457 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6458
6459 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6460 {
6461 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6462 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6463 rcStrict = VINF_SUCCESS;
6464 }
6465
6466 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6467 return rcStrict;
6468}
6469
6470
6471/**
6472 * VM-exit exception handler for \#AC (Alignment-check exception).
6473 *
6474 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6475 */
6476static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6477{
6478 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6479
6480 /*
6481 * Detect #ACs caused by host having enabled split-lock detection.
6482 * Emulate such instructions.
6483 */
6484 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo,
6485 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
6486 AssertRCReturn(rc, rc);
6487 /** @todo detect split lock in cpu feature? */
6488 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6489 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6490 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6491 || CPUMGetGuestCPL(pVCpu) != 3
6492 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6493 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6494 {
6495 /*
6496 * Check for debug/trace events and import state accordingly.
6497 */
6498 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6499 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6500 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
6501#ifndef IN_NEM_DARWIN
6502 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
6503#endif
6504 )
6505 {
6506 if (pVM->cCpus == 1)
6507 {
6508#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6509 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6510#else
6511 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6512#endif
6513 AssertRCReturn(rc, rc);
6514 }
6515 }
6516 else
6517 {
6518 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6519 AssertRCReturn(rc, rc);
6520
6521 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
6522
6523 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
6524 {
6525 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
6526 if (rcStrict != VINF_SUCCESS)
6527 return rcStrict;
6528 }
6529 }
6530
6531 /*
6532 * Emulate the instruction.
6533 *
6534 * We have to ignore the LOCK prefix here as we must not retrigger the
6535 * detection on the host. This isn't all that satisfactory, though...
6536 */
6537 if (pVM->cCpus == 1)
6538 {
6539 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
6540 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6541
6542 /** @todo For SMP configs we should do a rendezvous here. */
6543 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
6544 if (rcStrict == VINF_SUCCESS)
6545#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6546 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6547 HM_CHANGED_GUEST_RIP
6548 | HM_CHANGED_GUEST_RFLAGS
6549 | HM_CHANGED_GUEST_GPRS_MASK
6550 | HM_CHANGED_GUEST_CS
6551 | HM_CHANGED_GUEST_SS);
6552#else
6553 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6554#endif
6555 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6556 {
6557 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6558 rcStrict = VINF_SUCCESS;
6559 }
6560 return rcStrict;
6561 }
6562 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
6563 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6564 return VINF_EM_EMULATE_SPLIT_LOCK;
6565 }
6566
6567 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
6568 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6569 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
6570
6571 /* Re-inject it. We'll detect any nesting before getting here. */
6572 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6573 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6574 return VINF_SUCCESS;
6575}
6576
6577
6578/**
6579 * VM-exit exception handler for \#DB (Debug exception).
6580 *
6581 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6582 */
6583static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6584{
6585 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6586 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
6587
6588 /*
6589 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
6590 */
6591 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6592
6593 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
6594 uint64_t const uDR6 = X86_DR6_INIT_VAL
6595 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
6596 | X86_DR6_BD | X86_DR6_BS));
6597
6598 int rc;
6599 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6600 if (!pVmxTransient->fIsNestedGuest)
6601 {
6602 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6603
6604 /*
6605 * Prevents stepping twice over the same instruction when the guest is stepping using
6606 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
6607 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
6608 */
6609 if ( rc == VINF_EM_DBG_STEPPED
6610 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
6611 {
6612 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6613 rc = VINF_EM_RAW_GUEST_TRAP;
6614 }
6615 }
6616 else
6617 rc = VINF_EM_RAW_GUEST_TRAP;
6618 Log6Func(("rc=%Rrc\n", rc));
6619 if (rc == VINF_EM_RAW_GUEST_TRAP)
6620 {
6621 /*
6622 * The exception was for the guest. Update DR6, DR7.GD and
6623 * IA32_DEBUGCTL.LBR before forwarding it.
6624 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
6625 */
6626#ifndef IN_NEM_DARWIN
6627 VMMRZCallRing3Disable(pVCpu);
6628 HM_DISABLE_PREEMPT(pVCpu);
6629
6630 pCtx->dr[6] &= ~X86_DR6_B_MASK;
6631 pCtx->dr[6] |= uDR6;
6632 if (CPUMIsGuestDebugStateActive(pVCpu))
6633 ASMSetDR6(pCtx->dr[6]);
6634
6635 HM_RESTORE_PREEMPT();
6636 VMMRZCallRing3Enable(pVCpu);
6637#else
6638 /** @todo */
6639#endif
6640
6641 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
6642 AssertRCReturn(rc, rc);
6643
6644 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
6645 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
6646
6647 /* Paranoia. */
6648 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
6649 pCtx->dr[7] |= X86_DR7_RA1_MASK;
6650
6651 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
6652 AssertRC(rc);
6653
6654 /*
6655 * Raise #DB in the guest.
6656 *
6657 * It is important to reflect exactly what the VM-exit gave us (preserving the
6658 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
6659 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
6660 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
6661 *
6662 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
6663 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6664 */
6665 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6666 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6667 return VINF_SUCCESS;
6668 }
6669
6670 /*
6671 * Not a guest trap, must be a hypervisor related debug event then.
6672 * Update DR6 in case someone is interested in it.
6673 */
6674 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
6675 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
6676 CPUMSetHyperDR6(pVCpu, uDR6);
6677
6678 return rc;
6679}
6680
6681
6682/**
6683 * Hacks its way around the lovely mesa driver's backdoor accesses.
6684 *
6685 * @sa hmR0SvmHandleMesaDrvGp.
6686 */
6687static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6688{
6689 LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
6690 RT_NOREF(pCtx);
6691
6692 /* For now we'll just skip the instruction. */
6693 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6694}
6695
6696
6697/**
6698 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
6699 * backdoor logging w/o checking what it is running inside.
6700 *
6701 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
6702 * backdoor port and magic numbers loaded in registers.
6703 *
6704 * @returns true if it is, false if it isn't.
6705 * @sa hmR0SvmIsMesaDrvGp.
6706 */
6707DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6708{
6709 /* 0xed: IN eAX,dx */
6710 uint8_t abInstr[1];
6711 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
6712 return false;
6713
6714 /* Check that it is #GP(0). */
6715 if (pVmxTransient->uExitIntErrorCode != 0)
6716 return false;
6717
6718 /* Check magic and port. */
6719 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
6720 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
6721 if (pCtx->rax != UINT32_C(0x564d5868))
6722 return false;
6723 if (pCtx->dx != UINT32_C(0x5658))
6724 return false;
6725
6726 /* Flat ring-3 CS. */
6727 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
6728 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
6729 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
6730 if (pCtx->cs.Attr.n.u2Dpl != 3)
6731 return false;
6732 if (pCtx->cs.u64Base != 0)
6733 return false;
6734
6735 /* Check opcode. */
6736 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
6737 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
6738 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
6739 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
6740 if (RT_FAILURE(rc))
6741 return false;
6742 if (abInstr[0] != 0xed)
6743 return false;
6744
6745 return true;
6746}
6747
6748
6749/**
6750 * VM-exit exception handler for \#GP (General-protection exception).
6751 *
6752 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6753 */
6754static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6755{
6756 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6757 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
6758
6759 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6760 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6761#ifndef IN_NEM_DARWIN
6762 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
6763 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6764 { /* likely */ }
6765 else
6766#endif
6767 {
6768#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6769# ifndef IN_NEM_DARWIN
6770 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6771# else
6772 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6773# endif
6774#endif
6775 /*
6776 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
6777 * executing a nested-guest, reflect #GP to the guest or nested-guest.
6778 */
6779 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6780 AssertRCReturn(rc, rc);
6781 Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
6782 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
6783
6784 if ( pVmxTransient->fIsNestedGuest
6785 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
6786 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
6787 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6788 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6789 else
6790 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
6791 return rc;
6792 }
6793
6794#ifndef IN_NEM_DARWIN
6795 Assert(CPUMIsGuestInRealModeEx(pCtx));
6796 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
6797 Assert(!pVmxTransient->fIsNestedGuest);
6798
6799 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6800 AssertRCReturn(rc, rc);
6801
6802 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
6803 if (rcStrict == VINF_SUCCESS)
6804 {
6805 if (!CPUMIsGuestInRealModeEx(pCtx))
6806 {
6807 /*
6808 * The guest is no longer in real-mode, check if we can continue executing the
6809 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
6810 */
6811 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
6812 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
6813 {
6814 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
6815 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6816 }
6817 else
6818 {
6819 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
6820 rcStrict = VINF_EM_RESCHEDULE;
6821 }
6822 }
6823 else
6824 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6825 }
6826 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6827 {
6828 rcStrict = VINF_SUCCESS;
6829 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6830 }
6831 return VBOXSTRICTRC_VAL(rcStrict);
6832#endif
6833}
6834
6835
6836/**
6837 * VM-exit exception handler wrapper for all other exceptions that are not handled
6838 * by a specific handler.
6839 *
6840 * This simply re-injects the exception back into the VM without any special
6841 * processing.
6842 *
6843 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6844 */
6845static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6846{
6847 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6848
6849#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6850# ifndef IN_NEM_DARWIN
6851 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6852 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
6853 ("uVector=%#x u32XcptBitmap=%#X32\n",
6854 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
6855 NOREF(pVmcsInfo);
6856# endif
6857#endif
6858
6859 /*
6860 * Re-inject the exception into the guest. This cannot be a double-fault condition which
6861 * would have been handled while checking exits due to event delivery.
6862 */
6863 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6864
6865#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6866 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6867 AssertRCReturn(rc, rc);
6868 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6869#endif
6870
6871#ifdef VBOX_WITH_STATISTICS
6872 switch (uVector)
6873 {
6874 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
6875 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
6876 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
6877 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6878 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
6879 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
6880 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6881 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
6882 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
6883 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
6884 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
6885 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
6886 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
6887 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
6888 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
6889 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
6890 default:
6891 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
6892 break;
6893 }
6894#endif
6895
6896 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
6897 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
6898 NOREF(uVector);
6899
6900 /* Re-inject the original exception into the guest. */
6901 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6902 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6903 return VINF_SUCCESS;
6904}
6905
6906
6907/**
6908 * VM-exit exception handler for all exceptions (except NMIs!).
6909 *
6910 * @remarks This may be called for both guests and nested-guests. Take care to not
6911 * make assumptions and avoid doing anything that is not relevant when
6912 * executing a nested-guest (e.g., Mesa driver hacks).
6913 */
6914static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6915{
6916 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6917
6918 /*
6919 * If this VM-exit occurred while delivering an event through the guest IDT, take
6920 * action based on the return code and additional hints (e.g. for page-faults)
6921 * that will be updated in the VMX transient structure.
6922 */
6923 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
6924 if (rcStrict == VINF_SUCCESS)
6925 {
6926 /*
6927 * If an exception caused a VM-exit due to delivery of an event, the original
6928 * event may have to be re-injected into the guest. We shall reinject it and
6929 * continue guest execution. However, page-fault is a complicated case and
6930 * needs additional processing done in vmxHCExitXcptPF().
6931 */
6932 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
6933 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6934 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
6935 || uVector == X86_XCPT_PF)
6936 {
6937 switch (uVector)
6938 {
6939 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
6940 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
6941 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
6942 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
6943 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
6944 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
6945 default:
6946 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
6947 }
6948 }
6949 /* else: inject pending event before resuming guest execution. */
6950 }
6951 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
6952 {
6953 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6954 rcStrict = VINF_SUCCESS;
6955 }
6956
6957 return rcStrict;
6958}
6959/** @} */
6960
6961
6962/** @name VM-exit handlers.
6963 * @{
6964 */
6965/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6966/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6967/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6968
6969/**
6970 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
6971 */
6972HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6973{
6974 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6975 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
6976
6977#ifndef IN_NEM_DARWIN
6978 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
6979 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
6980 return VINF_SUCCESS;
6981 return VINF_EM_RAW_INTERRUPT;
6982#else
6983 return VINF_SUCCESS;
6984#endif
6985}
6986
6987
6988/**
6989 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
6990 * VM-exit.
6991 */
6992HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6993{
6994 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6995 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
6996
6997 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
6998
6999 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7000 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7001 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7002
7003 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7004 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7005 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7006 NOREF(pVmcsInfo);
7007
7008 VBOXSTRICTRC rcStrict;
7009 switch (uExitIntType)
7010 {
7011#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7012 /*
7013 * Host physical NMIs:
7014 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7015 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7016 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7017 *
7018 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7019 * See Intel spec. 27.5.5 "Updating Non-Register State".
7020 */
7021 case VMX_EXIT_INT_INFO_TYPE_NMI:
7022 {
7023 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7024 break;
7025 }
7026#endif
7027
7028 /*
7029 * Privileged software exceptions (#DB from ICEBP),
7030 * Software exceptions (#BP and #OF),
7031 * Hardware exceptions:
7032 * Process the required exceptions and resume guest execution if possible.
7033 */
7034 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7035 Assert(uVector == X86_XCPT_DB);
7036 RT_FALL_THRU();
7037 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7038 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7039 RT_FALL_THRU();
7040 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7041 {
7042 NOREF(uVector);
7043 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
7044 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7045 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
7046 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
7047
7048 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7049 break;
7050 }
7051
7052 default:
7053 {
7054 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7055 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7056 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7057 break;
7058 }
7059 }
7060
7061 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7062 return rcStrict;
7063}
7064
7065
7066/**
7067 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7068 */
7069HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7070{
7071 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7072
7073 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7074 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7075 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7076
7077 /* Evaluate and deliver pending events and resume guest execution. */
7078 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7079 return VINF_SUCCESS;
7080}
7081
7082
7083/**
7084 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7085 */
7086HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7087{
7088 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7089
7090 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7091 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7092 {
7093 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7094 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7095 }
7096
7097 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
7098
7099 /*
7100 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7101 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7102 */
7103 uint32_t fIntrState;
7104 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7105 AssertRC(rc);
7106 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7107 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7108 {
7109 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7110 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7111
7112 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7113 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7114 AssertRC(rc);
7115 }
7116
7117 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7118 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7119
7120 /* Evaluate and deliver pending events and resume guest execution. */
7121 return VINF_SUCCESS;
7122}
7123
7124
7125/**
7126 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7127 */
7128HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7129{
7130 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7131 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7132}
7133
7134
7135/**
7136 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7137 */
7138HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7139{
7140 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7141 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7142}
7143
7144
7145/**
7146 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7147 */
7148HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7149{
7150 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7151
7152 /*
7153 * Get the state we need and update the exit history entry.
7154 */
7155 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7156 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7157
7158 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7159 AssertRCReturn(rc, rc);
7160
7161 VBOXSTRICTRC rcStrict;
7162 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7163 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7164 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7165 if (!pExitRec)
7166 {
7167 /*
7168 * Regular CPUID instruction execution.
7169 */
7170 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7171 if (rcStrict == VINF_SUCCESS)
7172 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7173 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7174 {
7175 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7176 rcStrict = VINF_SUCCESS;
7177 }
7178 }
7179 else
7180 {
7181 /*
7182 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7183 */
7184 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7185 AssertRCReturn(rc2, rc2);
7186
7187 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7188 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7189
7190 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7191 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7192
7193 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7194 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7195 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7196 }
7197 return rcStrict;
7198}
7199
7200
7201/**
7202 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7203 */
7204HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7205{
7206 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7207
7208 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7209 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
7210 AssertRCReturn(rc, rc);
7211
7212 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7213 return VINF_EM_RAW_EMULATE_INSTR;
7214
7215 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7216 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7217}
7218
7219
7220/**
7221 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7222 */
7223HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7224{
7225 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7226
7227 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7228 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7229 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
7230 AssertRCReturn(rc, rc);
7231
7232 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7233 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7234 {
7235 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7236 we must reset offsetting on VM-entry. See @bugref{6634}. */
7237 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7238 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7239 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7240 }
7241 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7242 {
7243 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7244 rcStrict = VINF_SUCCESS;
7245 }
7246 return rcStrict;
7247}
7248
7249
7250/**
7251 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7252 */
7253HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7254{
7255 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7256
7257 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7258 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7259 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
7260 AssertRCReturn(rc, rc);
7261
7262 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7263 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7264 {
7265 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7266 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7267 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7268 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7269 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7270 }
7271 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7272 {
7273 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7274 rcStrict = VINF_SUCCESS;
7275 }
7276 return rcStrict;
7277}
7278
7279
7280/**
7281 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7282 */
7283HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7284{
7285 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7286
7287 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7288 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
7289 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
7290 AssertRCReturn(rc, rc);
7291
7292 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7293 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7294 if (RT_LIKELY(rc == VINF_SUCCESS))
7295 {
7296 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7297 Assert(pVmxTransient->cbExitInstr == 2);
7298 }
7299 else
7300 {
7301 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7302 rc = VERR_EM_INTERPRETER;
7303 }
7304 return rc;
7305}
7306
7307
7308/**
7309 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7310 */
7311HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7312{
7313 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7314
7315 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7316 if (EMAreHypercallInstructionsEnabled(pVCpu))
7317 {
7318 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7319 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
7320 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
7321 AssertRCReturn(rc, rc);
7322
7323 /* Perform the hypercall. */
7324 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7325 if (rcStrict == VINF_SUCCESS)
7326 {
7327 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7328 AssertRCReturn(rc, rc);
7329 }
7330 else
7331 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7332 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7333 || RT_FAILURE(rcStrict));
7334
7335 /* If the hypercall changes anything other than guest's general-purpose registers,
7336 we would need to reload the guest changed bits here before VM-entry. */
7337 }
7338 else
7339 Log4Func(("Hypercalls not enabled\n"));
7340
7341 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7342 if (RT_FAILURE(rcStrict))
7343 {
7344 vmxHCSetPendingXcptUD(pVCpu);
7345 rcStrict = VINF_SUCCESS;
7346 }
7347
7348 return rcStrict;
7349}
7350
7351
7352/**
7353 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7354 */
7355HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7356{
7357 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7358#ifndef IN_NEM_DARWIN
7359 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7360#endif
7361
7362 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7363 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7364 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7365 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7366 AssertRCReturn(rc, rc);
7367
7368 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7369
7370 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7371 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7372 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7373 {
7374 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7375 rcStrict = VINF_SUCCESS;
7376 }
7377 else
7378 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7379 VBOXSTRICTRC_VAL(rcStrict)));
7380 return rcStrict;
7381}
7382
7383
7384/**
7385 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7386 */
7387HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7388{
7389 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7390
7391 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7392 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7393 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
7394 AssertRCReturn(rc, rc);
7395
7396 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7397 if (rcStrict == VINF_SUCCESS)
7398 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7399 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7400 {
7401 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7402 rcStrict = VINF_SUCCESS;
7403 }
7404
7405 return rcStrict;
7406}
7407
7408
7409/**
7410 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7411 */
7412HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7413{
7414 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7415
7416 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7417 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7418 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7419 AssertRCReturn(rc, rc);
7420
7421 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7422 if (RT_SUCCESS(rcStrict))
7423 {
7424 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7425 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7426 rcStrict = VINF_SUCCESS;
7427 }
7428
7429 return rcStrict;
7430}
7431
7432
7433/**
7434 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7435 * VM-exit.
7436 */
7437HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7438{
7439 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7440 return VINF_EM_RESET;
7441}
7442
7443
7444/**
7445 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7446 */
7447HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7448{
7449 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7450
7451 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7452 AssertRCReturn(rc, rc);
7453
7454 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7455 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
7456 rc = VINF_SUCCESS;
7457 else
7458 rc = VINF_EM_HALT;
7459
7460 if (rc != VINF_SUCCESS)
7461 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
7462 return rc;
7463}
7464
7465
7466/**
7467 * VM-exit handler for instructions that result in a \#UD exception delivered to
7468 * the guest.
7469 */
7470HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7471{
7472 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7473 vmxHCSetPendingXcptUD(pVCpu);
7474 return VINF_SUCCESS;
7475}
7476
7477
7478/**
7479 * VM-exit handler for expiry of the VMX-preemption timer.
7480 */
7481HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7482{
7483 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7484
7485 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
7486 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7487Log12(("vmxHCExitPreemptTimer:\n"));
7488
7489 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7490 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7491 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7492 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
7493 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7494}
7495
7496
7497/**
7498 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7499 */
7500HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7501{
7502 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7503
7504 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7505 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7506 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
7507 AssertRCReturn(rc, rc);
7508
7509 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
7510 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
7511 : HM_CHANGED_RAISED_XCPT_MASK);
7512
7513#ifndef IN_NEM_DARWIN
7514 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7515 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
7516 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
7517 {
7518 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
7519 hmR0VmxUpdateStartVmFunction(pVCpu);
7520 }
7521#endif
7522
7523 return rcStrict;
7524}
7525
7526
7527/**
7528 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7529 */
7530HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7531{
7532 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7533
7534 /** @todo Enable the new code after finding a reliably guest test-case. */
7535#if 1
7536 return VERR_EM_INTERPRETER;
7537#else
7538 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7539 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
7540 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7541 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
7542 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7543 AssertRCReturn(rc, rc);
7544
7545 /* Paranoia. Ensure this has a memory operand. */
7546 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
7547
7548 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
7549 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
7550 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
7551 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
7552
7553 RTGCPTR GCPtrDesc;
7554 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
7555
7556 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
7557 GCPtrDesc, uType);
7558 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7559 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7560 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7561 {
7562 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7563 rcStrict = VINF_SUCCESS;
7564 }
7565 return rcStrict;
7566#endif
7567}
7568
7569
7570/**
7571 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
7572 * VM-exit.
7573 */
7574HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7575{
7576 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7577 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7578 AssertRCReturn(rc, rc);
7579
7580 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
7581 if (RT_FAILURE(rc))
7582 return rc;
7583
7584 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
7585 NOREF(uInvalidReason);
7586
7587#ifdef VBOX_STRICT
7588 uint32_t fIntrState;
7589 uint64_t u64Val;
7590 vmxHCReadEntryIntInfoVmcs(pVCpu, pVmxTransient);
7591 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
7592 vmxHCReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7593
7594 Log4(("uInvalidReason %u\n", uInvalidReason));
7595 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
7596 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7597 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7598
7599 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
7600 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
7601 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
7602 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
7603 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
7604 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
7605 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
7606 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7607 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
7608 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
7609 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
7610 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7611# ifndef IN_NEM_DARWIN
7612 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
7613 {
7614 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7615 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7616 }
7617
7618 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
7619# endif
7620#endif
7621
7622 return VERR_VMX_INVALID_GUEST_STATE;
7623}
7624
7625/**
7626 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
7627 */
7628HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7629{
7630 /*
7631 * Cumulative notes of all recognized but unexpected VM-exits.
7632 *
7633 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
7634 * nested-paging is used.
7635 *
7636 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
7637 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
7638 * this function (and thereby stop VM execution) for handling such instructions.
7639 *
7640 *
7641 * VMX_EXIT_INIT_SIGNAL:
7642 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
7643 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
7644 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
7645 *
7646 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
7647 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
7648 * See Intel spec. "23.8 Restrictions on VMX operation".
7649 *
7650 * VMX_EXIT_SIPI:
7651 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
7652 * activity state is used. We don't make use of it as our guests don't have direct
7653 * access to the host local APIC.
7654 *
7655 * See Intel spec. 25.3 "Other Causes of VM-exits".
7656 *
7657 * VMX_EXIT_IO_SMI:
7658 * VMX_EXIT_SMI:
7659 * This can only happen if we support dual-monitor treatment of SMI, which can be
7660 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
7661 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
7662 * VMX root mode or receive an SMI. If we get here, something funny is going on.
7663 *
7664 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
7665 * See Intel spec. 25.3 "Other Causes of VM-Exits"
7666 *
7667 * VMX_EXIT_ERR_MSR_LOAD:
7668 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
7669 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
7670 * execution.
7671 *
7672 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
7673 *
7674 * VMX_EXIT_ERR_MACHINE_CHECK:
7675 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
7676 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
7677 * #MC exception abort class exception is raised. We thus cannot assume a
7678 * reasonable chance of continuing any sort of execution and we bail.
7679 *
7680 * See Intel spec. 15.1 "Machine-check Architecture".
7681 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
7682 *
7683 * VMX_EXIT_PML_FULL:
7684 * VMX_EXIT_VIRTUALIZED_EOI:
7685 * VMX_EXIT_APIC_WRITE:
7686 * We do not currently support any of these features and thus they are all unexpected
7687 * VM-exits.
7688 *
7689 * VMX_EXIT_GDTR_IDTR_ACCESS:
7690 * VMX_EXIT_LDTR_TR_ACCESS:
7691 * VMX_EXIT_RDRAND:
7692 * VMX_EXIT_RSM:
7693 * VMX_EXIT_VMFUNC:
7694 * VMX_EXIT_ENCLS:
7695 * VMX_EXIT_RDSEED:
7696 * VMX_EXIT_XSAVES:
7697 * VMX_EXIT_XRSTORS:
7698 * VMX_EXIT_UMWAIT:
7699 * VMX_EXIT_TPAUSE:
7700 * VMX_EXIT_LOADIWKEY:
7701 * These VM-exits are -not- caused unconditionally by execution of the corresponding
7702 * instruction. Any VM-exit for these instructions indicate a hardware problem,
7703 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
7704 *
7705 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
7706 */
7707 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7708 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
7709 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7710}
7711
7712
7713/**
7714 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7715 */
7716HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7717{
7718 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7719
7720 /** @todo Optimize this: We currently drag in the whole MSR state
7721 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7722 * MSRs required. That would require changes to IEM and possibly CPUM too.
7723 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7724 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7725 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7726 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7727 switch (idMsr)
7728 {
7729 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7730 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7731 }
7732
7733 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7734 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7735 AssertRCReturn(rc, rc);
7736
7737 Log4Func(("ecx=%#RX32\n", idMsr));
7738
7739#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7740 Assert(!pVmxTransient->fIsNestedGuest);
7741 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7742 {
7743 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
7744 && idMsr != MSR_K6_EFER)
7745 {
7746 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
7747 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7748 }
7749 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7750 {
7751 Assert(pVmcsInfo->pvMsrBitmap);
7752 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7753 if (fMsrpm & VMXMSRPM_ALLOW_RD)
7754 {
7755 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
7756 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7757 }
7758 }
7759 }
7760#endif
7761
7762 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
7763 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
7764 if (rcStrict == VINF_SUCCESS)
7765 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7766 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7767 {
7768 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7769 rcStrict = VINF_SUCCESS;
7770 }
7771 else
7772 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
7773 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7774
7775 return rcStrict;
7776}
7777
7778
7779/**
7780 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7781 */
7782HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7783{
7784 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7785
7786 /** @todo Optimize this: We currently drag in the whole MSR state
7787 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7788 * MSRs required. That would require changes to IEM and possibly CPUM too.
7789 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7790 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7791 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7792
7793 /*
7794 * The FS and GS base MSRs are not part of the above all-MSRs mask.
7795 * Although we don't need to fetch the base as it will be overwritten shortly, while
7796 * loading guest-state we would also load the entire segment register including limit
7797 * and attributes and thus we need to load them here.
7798 */
7799 switch (idMsr)
7800 {
7801 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7802 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7803 }
7804
7805 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7806 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7807 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7808 AssertRCReturn(rc, rc);
7809
7810 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
7811
7812 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
7813 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
7814
7815 if (rcStrict == VINF_SUCCESS)
7816 {
7817 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7818
7819 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7820 if ( idMsr == MSR_IA32_APICBASE
7821 || ( idMsr >= MSR_IA32_X2APIC_START
7822 && idMsr <= MSR_IA32_X2APIC_END))
7823 {
7824 /*
7825 * We've already saved the APIC related guest-state (TPR) in post-run phase.
7826 * When full APIC register virtualization is implemented we'll have to make
7827 * sure APIC state is saved from the VMCS before IEM changes it.
7828 */
7829 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7830 }
7831 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7832 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7833 else if (idMsr == MSR_K6_EFER)
7834 {
7835 /*
7836 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
7837 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
7838 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
7839 */
7840 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
7841 }
7842
7843 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
7844 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
7845 {
7846 switch (idMsr)
7847 {
7848 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
7849 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
7850 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
7851 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
7852 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
7853 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
7854 default:
7855 {
7856#ifndef IN_NEM_DARWIN
7857 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7858 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
7859 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7860 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
7861#else
7862 AssertMsgFailed(("TODO\n"));
7863#endif
7864 break;
7865 }
7866 }
7867 }
7868#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7869 else
7870 {
7871 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7872 switch (idMsr)
7873 {
7874 case MSR_IA32_SYSENTER_CS:
7875 case MSR_IA32_SYSENTER_EIP:
7876 case MSR_IA32_SYSENTER_ESP:
7877 case MSR_K8_FS_BASE:
7878 case MSR_K8_GS_BASE:
7879 {
7880 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
7881 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7882 }
7883
7884 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
7885 default:
7886 {
7887 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7888 {
7889 /* EFER MSR writes are always intercepted. */
7890 if (idMsr != MSR_K6_EFER)
7891 {
7892 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
7893 idMsr));
7894 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7895 }
7896 }
7897
7898 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7899 {
7900 Assert(pVmcsInfo->pvMsrBitmap);
7901 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7902 if (fMsrpm & VMXMSRPM_ALLOW_WR)
7903 {
7904 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
7905 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7906 }
7907 }
7908 break;
7909 }
7910 }
7911 }
7912#endif /* VBOX_STRICT */
7913 }
7914 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7915 {
7916 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7917 rcStrict = VINF_SUCCESS;
7918 }
7919 else
7920 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
7921 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7922
7923 return rcStrict;
7924}
7925
7926
7927/**
7928 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
7929 */
7930HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7931{
7932 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7933
7934 /** @todo The guest has likely hit a contended spinlock. We might want to
7935 * poke a schedule different guest VCPU. */
7936 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7937 if (RT_SUCCESS(rc))
7938 return VINF_EM_RAW_INTERRUPT;
7939
7940 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
7941 return rc;
7942}
7943
7944
7945/**
7946 * VM-exit handler for when the TPR value is lowered below the specified
7947 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
7948 */
7949HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7950{
7951 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7952 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
7953
7954 /*
7955 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
7956 * We'll re-evaluate pending interrupts and inject them before the next VM
7957 * entry so we can just continue execution here.
7958 */
7959 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
7960 return VINF_SUCCESS;
7961}
7962
7963
7964/**
7965 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
7966 * VM-exit.
7967 *
7968 * @retval VINF_SUCCESS when guest execution can continue.
7969 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
7970 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
7971 * incompatible guest state for VMX execution (real-on-v86 case).
7972 */
7973HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7974{
7975 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7976 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
7977
7978 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7979 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7980 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7981
7982 VBOXSTRICTRC rcStrict;
7983 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7984 uint64_t const uExitQual = pVmxTransient->uExitQual;
7985 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
7986 switch (uAccessType)
7987 {
7988 /*
7989 * MOV to CRx.
7990 */
7991 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
7992 {
7993 /*
7994 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
7995 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
7996 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
7997 * PAE PDPTEs as well.
7998 */
7999 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8000 AssertRCReturn(rc, rc);
8001
8002 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8003#ifndef IN_NEM_DARWIN
8004 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8005#endif
8006 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8007 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8008
8009 /*
8010 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8011 * - When nested paging isn't used.
8012 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8013 * - We are executing in the VM debug loop.
8014 */
8015#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8016# ifndef IN_NEM_DARWIN
8017 Assert( iCrReg != 3
8018 || !VM_IS_VMX_NESTED_PAGING(pVM)
8019 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8020 || pVCpu->hmr0.s.fUsingDebugLoop);
8021# else
8022 Assert( iCrReg != 3
8023 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8024# endif
8025#endif
8026
8027 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8028 Assert( iCrReg != 8
8029 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8030
8031 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8032 AssertMsg( rcStrict == VINF_SUCCESS
8033 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8034
8035#ifndef IN_NEM_DARWIN
8036 /*
8037 * This is a kludge for handling switches back to real mode when we try to use
8038 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8039 * deal with special selector values, so we have to return to ring-3 and run
8040 * there till the selector values are V86 mode compatible.
8041 *
8042 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8043 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8044 * this function.
8045 */
8046 if ( iCrReg == 0
8047 && rcStrict == VINF_SUCCESS
8048 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8049 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8050 && (uOldCr0 & X86_CR0_PE)
8051 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8052 {
8053 /** @todo Check selectors rather than returning all the time. */
8054 Assert(!pVmxTransient->fIsNestedGuest);
8055 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8056 rcStrict = VINF_EM_RESCHEDULE_REM;
8057 }
8058#endif
8059
8060 break;
8061 }
8062
8063 /*
8064 * MOV from CRx.
8065 */
8066 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8067 {
8068 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8069 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8070
8071 /*
8072 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8073 * - When nested paging isn't used.
8074 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8075 * - We are executing in the VM debug loop.
8076 */
8077#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8078# ifndef IN_NEM_DARWIN
8079 Assert( iCrReg != 3
8080 || !VM_IS_VMX_NESTED_PAGING(pVM)
8081 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8082 || pVCpu->hmr0.s.fLeaveDone);
8083# else
8084 Assert( iCrReg != 3
8085 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8086# endif
8087#endif
8088
8089 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8090 Assert( iCrReg != 8
8091 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8092
8093 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8094 break;
8095 }
8096
8097 /*
8098 * CLTS (Clear Task-Switch Flag in CR0).
8099 */
8100 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8101 {
8102 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8103 break;
8104 }
8105
8106 /*
8107 * LMSW (Load Machine-Status Word into CR0).
8108 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8109 */
8110 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8111 {
8112 RTGCPTR GCPtrEffDst;
8113 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8114 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8115 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8116 if (fMemOperand)
8117 {
8118 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
8119 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8120 }
8121 else
8122 GCPtrEffDst = NIL_RTGCPTR;
8123 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8124 break;
8125 }
8126
8127 default:
8128 {
8129 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8130 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8131 }
8132 }
8133
8134 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8135 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8136 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8137
8138 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8139 NOREF(pVM);
8140 return rcStrict;
8141}
8142
8143
8144/**
8145 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8146 * VM-exit.
8147 */
8148HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8149{
8150 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8151 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8152
8153 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8154 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8155 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8156 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8157 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
8158 | CPUMCTX_EXTRN_EFER);
8159 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8160 AssertRCReturn(rc, rc);
8161
8162 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8163 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8164 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8165 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8166 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8167 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8168 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8169 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8170
8171 /*
8172 * Update exit history to see if this exit can be optimized.
8173 */
8174 VBOXSTRICTRC rcStrict;
8175 PCEMEXITREC pExitRec = NULL;
8176 if ( !fGstStepping
8177 && !fDbgStepping)
8178 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8179 !fIOString
8180 ? !fIOWrite
8181 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8182 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8183 : !fIOWrite
8184 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8185 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8186 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8187 if (!pExitRec)
8188 {
8189 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8190 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8191
8192 uint32_t const cbValue = s_aIOSizes[uIOSize];
8193 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8194 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8195 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8196 if (fIOString)
8197 {
8198 /*
8199 * INS/OUTS - I/O String instruction.
8200 *
8201 * Use instruction-information if available, otherwise fall back on
8202 * interpreting the instruction.
8203 */
8204 Log4Func(("cs:rip=%#04x:%#RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8205 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8206 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8207 if (fInsOutsInfo)
8208 {
8209 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8210 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8211 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8212 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8213 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8214 if (fIOWrite)
8215 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8216 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8217 else
8218 {
8219 /*
8220 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8221 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8222 * See Intel Instruction spec. for "INS".
8223 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8224 */
8225 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8226 }
8227 }
8228 else
8229 rcStrict = IEMExecOne(pVCpu);
8230
8231 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8232 fUpdateRipAlready = true;
8233 }
8234 else
8235 {
8236 /*
8237 * IN/OUT - I/O instruction.
8238 */
8239 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8240 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8241 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8242 if (fIOWrite)
8243 {
8244 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8245 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8246#ifndef IN_NEM_DARWIN
8247 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8248 && !pCtx->eflags.Bits.u1TF)
8249 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8250#endif
8251 }
8252 else
8253 {
8254 uint32_t u32Result = 0;
8255 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8256 if (IOM_SUCCESS(rcStrict))
8257 {
8258 /* Save result of I/O IN instr. in AL/AX/EAX. */
8259 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8260 }
8261#ifndef IN_NEM_DARWIN
8262 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8263 && !pCtx->eflags.Bits.u1TF)
8264 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8265#endif
8266 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8267 }
8268 }
8269
8270 if (IOM_SUCCESS(rcStrict))
8271 {
8272 if (!fUpdateRipAlready)
8273 {
8274 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8275 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8276 }
8277
8278 /*
8279 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8280 * while booting Fedora 17 64-bit guest.
8281 *
8282 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8283 */
8284 if (fIOString)
8285 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8286
8287 /*
8288 * If any I/O breakpoints are armed, we need to check if one triggered
8289 * and take appropriate action.
8290 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8291 */
8292 rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
8293 AssertRCReturn(rc, rc);
8294
8295 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8296 * execution engines about whether hyper BPs and such are pending. */
8297 uint32_t const uDr7 = pCtx->dr[7];
8298 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8299 && X86_DR7_ANY_RW_IO(uDr7)
8300 && (pCtx->cr4 & X86_CR4_DE))
8301 || DBGFBpIsHwIoArmed(pVM)))
8302 {
8303 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8304
8305#ifndef IN_NEM_DARWIN
8306 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8307 VMMRZCallRing3Disable(pVCpu);
8308 HM_DISABLE_PREEMPT(pVCpu);
8309
8310 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8311
8312 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8313 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8314 {
8315 /* Raise #DB. */
8316 if (fIsGuestDbgActive)
8317 ASMSetDR6(pCtx->dr[6]);
8318 if (pCtx->dr[7] != uDr7)
8319 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8320
8321 vmxHCSetPendingXcptDB(pVCpu);
8322 }
8323 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8324 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8325 else if ( rcStrict2 != VINF_SUCCESS
8326 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8327 rcStrict = rcStrict2;
8328 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8329
8330 HM_RESTORE_PREEMPT();
8331 VMMRZCallRing3Enable(pVCpu);
8332#else
8333 /** @todo */
8334#endif
8335 }
8336 }
8337
8338#ifdef VBOX_STRICT
8339 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8340 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8341 Assert(!fIOWrite);
8342 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8343 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8344 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8345 Assert(fIOWrite);
8346 else
8347 {
8348# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8349 * statuses, that the VMM device and some others may return. See
8350 * IOM_SUCCESS() for guidance. */
8351 AssertMsg( RT_FAILURE(rcStrict)
8352 || rcStrict == VINF_SUCCESS
8353 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8354 || rcStrict == VINF_EM_DBG_BREAKPOINT
8355 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8356 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8357# endif
8358 }
8359#endif
8360 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8361 }
8362 else
8363 {
8364 /*
8365 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8366 */
8367 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8368 AssertRCReturn(rc2, rc2);
8369 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8370 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8371 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8372 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8373 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8374 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8375
8376 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8377 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8378
8379 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8380 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8381 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8382 }
8383 return rcStrict;
8384}
8385
8386
8387/**
8388 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8389 * VM-exit.
8390 */
8391HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8392{
8393 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8394
8395 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8396 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8397 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8398 {
8399 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8400 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8401 {
8402 uint32_t uErrCode;
8403 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8404 {
8405 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8406 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8407 }
8408 else
8409 uErrCode = 0;
8410
8411 RTGCUINTPTR GCPtrFaultAddress;
8412 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8413 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8414 else
8415 GCPtrFaultAddress = 0;
8416
8417 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8418
8419 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8420 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8421
8422 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8423 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8424 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8425 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8426 }
8427 }
8428
8429 /* Fall back to the interpreter to emulate the task-switch. */
8430 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8431 return VERR_EM_INTERPRETER;
8432}
8433
8434
8435/**
8436 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8437 */
8438HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8439{
8440 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8441
8442 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8443 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
8444 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8445 AssertRC(rc);
8446 return VINF_EM_DBG_STEPPED;
8447}
8448
8449
8450/**
8451 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8452 */
8453HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8454{
8455 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8456 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
8457
8458 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8459 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8460 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8461 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8462 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8463
8464 /*
8465 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8466 */
8467 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8468 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8469 {
8470 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
8471 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
8472 {
8473 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8474 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8475 }
8476 }
8477 else
8478 {
8479 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8480 return rcStrict;
8481 }
8482
8483 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
8484 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8485 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8486 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8487 AssertRCReturn(rc, rc);
8488
8489 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8490 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
8491 switch (uAccessType)
8492 {
8493#ifndef IN_NEM_DARWIN
8494 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8495 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8496 {
8497 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
8498 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
8499 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8500
8501 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
8502 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
8503 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
8504 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
8505 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
8506
8507 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
8508 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
8509 Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8510 if ( rcStrict == VINF_SUCCESS
8511 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8512 || rcStrict == VERR_PAGE_NOT_PRESENT)
8513 {
8514 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8515 | HM_CHANGED_GUEST_APIC_TPR);
8516 rcStrict = VINF_SUCCESS;
8517 }
8518 break;
8519 }
8520#else
8521 /** @todo */
8522#endif
8523
8524 default:
8525 {
8526 Log4Func(("uAccessType=%#x\n", uAccessType));
8527 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
8528 break;
8529 }
8530 }
8531
8532 if (rcStrict != VINF_SUCCESS)
8533 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
8534 return rcStrict;
8535}
8536
8537
8538/**
8539 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8540 * VM-exit.
8541 */
8542HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8543{
8544 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8545 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8546
8547 /*
8548 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
8549 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
8550 * must emulate the MOV DRx access.
8551 */
8552 if (!pVmxTransient->fIsNestedGuest)
8553 {
8554 /* We should -not- get this VM-exit if the guest's debug registers were active. */
8555 if (pVmxTransient->fWasGuestDebugStateActive)
8556 {
8557 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
8558 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8559 }
8560
8561 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
8562 && !pVmxTransient->fWasHyperDebugStateActive)
8563 {
8564 Assert(!DBGFIsStepping(pVCpu));
8565 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
8566
8567 /* Don't intercept MOV DRx any more. */
8568 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
8569 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8570 AssertRC(rc);
8571
8572#ifndef IN_NEM_DARWIN
8573 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
8574 VMMRZCallRing3Disable(pVCpu);
8575 HM_DISABLE_PREEMPT(pVCpu);
8576
8577 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8578 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
8579 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8580
8581 HM_RESTORE_PREEMPT();
8582 VMMRZCallRing3Enable(pVCpu);
8583#else
8584 CPUMR3NemActivateGuestDebugState(pVCpu);
8585 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8586 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
8587#endif
8588
8589#ifdef VBOX_WITH_STATISTICS
8590 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8591 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8592 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8593 else
8594 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8595#endif
8596 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
8597 return VINF_SUCCESS;
8598 }
8599 }
8600
8601 /*
8602 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
8603 * The EFER MSR is always up-to-date.
8604 * Update the segment registers and DR7 from the CPU.
8605 */
8606 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8607 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8608 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
8609 AssertRCReturn(rc, rc);
8610 Log4Func(("cs:rip=%#04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
8611
8612 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8613 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8614 {
8615 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8616 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
8617 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
8618 if (RT_SUCCESS(rc))
8619 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
8620 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8621 }
8622 else
8623 {
8624 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8625 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
8626 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
8627 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8628 }
8629
8630 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8631 if (RT_SUCCESS(rc))
8632 {
8633 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8634 AssertRCReturn(rc2, rc2);
8635 return VINF_SUCCESS;
8636 }
8637 return rc;
8638}
8639
8640
8641/**
8642 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8643 * Conditional VM-exit.
8644 */
8645HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8646{
8647 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8648
8649#ifndef IN_NEM_DARWIN
8650 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8651
8652 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8653 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8654 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8655 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8656 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8657
8658 /*
8659 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8660 */
8661 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8662 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8663 {
8664 /*
8665 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
8666 * instruction emulation to inject the original event. Otherwise, injecting the original event
8667 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
8668 */
8669 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8670 { /* likely */ }
8671 else
8672 {
8673 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8674#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8675 /** @todo NSTVMX: Think about how this should be handled. */
8676 if (pVmxTransient->fIsNestedGuest)
8677 return VERR_VMX_IPE_3;
8678#endif
8679 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8680 }
8681 }
8682 else
8683 {
8684 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8685 return rcStrict;
8686 }
8687
8688 /*
8689 * Get sufficient state and update the exit history entry.
8690 */
8691 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8692 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8693 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8694 AssertRCReturn(rc, rc);
8695
8696 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8697 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8698 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
8699 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8700 if (!pExitRec)
8701 {
8702 /*
8703 * If we succeed, resume guest execution.
8704 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8705 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8706 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8707 * weird case. See @bugref{6043}.
8708 */
8709 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8710 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8711/** @todo bird: We can probably just go straight to IOM here and assume that
8712 * it's MMIO, then fall back on PGM if that hunch didn't work out so
8713 * well. However, we need to address that aliasing workarounds that
8714 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
8715 *
8716 * Might also be interesting to see if we can get this done more or
8717 * less locklessly inside IOM. Need to consider the lookup table
8718 * updating and use a bit more carefully first (or do all updates via
8719 * rendezvous) */
8720 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
8721 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
8722 if ( rcStrict == VINF_SUCCESS
8723 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8724 || rcStrict == VERR_PAGE_NOT_PRESENT)
8725 {
8726 /* Successfully handled MMIO operation. */
8727 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8728 | HM_CHANGED_GUEST_APIC_TPR);
8729 rcStrict = VINF_SUCCESS;
8730 }
8731 }
8732 else
8733 {
8734 /*
8735 * Frequent exit or something needing probing. Call EMHistoryExec.
8736 */
8737 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
8738 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
8739
8740 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8741 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8742
8743 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8744 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8745 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8746 }
8747 return rcStrict;
8748#else
8749 AssertFailed();
8750 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
8751#endif
8752}
8753
8754
8755/**
8756 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8757 * VM-exit.
8758 */
8759HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8760{
8761 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8762#ifndef IN_NEM_DARWIN
8763 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8764
8765 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8766 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8767 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8768 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8769 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8770 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8771
8772 /*
8773 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8774 */
8775 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8776 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8777 {
8778 /*
8779 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
8780 * we shall resolve the nested #PF and re-inject the original event.
8781 */
8782 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8783 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
8784 }
8785 else
8786 {
8787 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8788 return rcStrict;
8789 }
8790
8791 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8792 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8793 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8794 AssertRCReturn(rc, rc);
8795
8796 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8797 uint64_t const uExitQual = pVmxTransient->uExitQual;
8798 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
8799
8800 RTGCUINT uErrorCode = 0;
8801 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
8802 uErrorCode |= X86_TRAP_PF_ID;
8803 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8804 uErrorCode |= X86_TRAP_PF_RW;
8805 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
8806 uErrorCode |= X86_TRAP_PF_P;
8807
8808 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8809 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%#RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
8810
8811 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8812
8813 /*
8814 * Handle the pagefault trap for the nested shadow table.
8815 */
8816 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8817 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
8818 TRPMResetTrap(pVCpu);
8819
8820 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8821 if ( rcStrict == VINF_SUCCESS
8822 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8823 || rcStrict == VERR_PAGE_NOT_PRESENT)
8824 {
8825 /* Successfully synced our nested page tables. */
8826 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
8827 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
8828 return VINF_SUCCESS;
8829 }
8830#else
8831 PVM pVM = pVCpu->CTX_SUFF(pVM);
8832 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
8833 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8834 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8835 vmxHCImportGuestRip(pVCpu);
8836 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
8837
8838 /*
8839 * Ask PGM for information about the given GCPhys. We need to check if we're
8840 * out of sync first.
8841 */
8842 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
8843 PGMPHYSNEMPAGEINFO Info;
8844 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
8845 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
8846 if (RT_SUCCESS(rc))
8847 {
8848 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8849 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
8850 {
8851 if (State.fCanResume)
8852 {
8853 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
8854 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8855 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8856 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8857 State.fDidSomething ? "" : " no-change"));
8858 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
8859 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8860 return VINF_SUCCESS;
8861 }
8862 }
8863
8864 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
8865 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8866 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8867 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8868 State.fDidSomething ? "" : " no-change"));
8869 }
8870 else
8871 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
8872 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8873 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
8874
8875 /*
8876 * Emulate the memory access, either access handler or special memory.
8877 */
8878 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
8879 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8880 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
8881 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
8882 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8883
8884 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8885 AssertRCReturn(rc, rc);
8886
8887 VBOXSTRICTRC rcStrict;
8888 if (!pExitRec)
8889 rcStrict = IEMExecOne(pVCpu);
8890 else
8891 {
8892 /* Frequent access or probing. */
8893 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8894 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8895 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8896 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8897 }
8898
8899 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8900#endif
8901
8902 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8903 return rcStrict;
8904}
8905
8906
8907#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8908/**
8909 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
8910 */
8911HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8912{
8913 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8914
8915 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8916 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8917 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8918 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8919 | CPUMCTX_EXTRN_HWVIRT
8920 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8921 AssertRCReturn(rc, rc);
8922
8923 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8924
8925 VMXVEXITINFO ExitInfo;
8926 RT_ZERO(ExitInfo);
8927 ExitInfo.uReason = pVmxTransient->uExitReason;
8928 ExitInfo.u64Qual = pVmxTransient->uExitQual;
8929 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
8930 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
8931 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
8932
8933 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
8934 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8935 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
8936 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8937 {
8938 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8939 rcStrict = VINF_SUCCESS;
8940 }
8941 return rcStrict;
8942}
8943
8944
8945/**
8946 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
8947 */
8948HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8949{
8950 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8951
8952 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
8953 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
8954 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8955 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8956 AssertRCReturn(rc, rc);
8957
8958 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8959
8960 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
8961 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
8962 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
8963 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8964 {
8965 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8966 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
8967 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
8968 }
8969 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8970 return rcStrict;
8971}
8972
8973
8974/**
8975 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
8976 */
8977HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8978{
8979 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8980
8981 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8982 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8983 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8984 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8985 | CPUMCTX_EXTRN_HWVIRT
8986 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8987 AssertRCReturn(rc, rc);
8988
8989 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8990
8991 VMXVEXITINFO ExitInfo;
8992 RT_ZERO(ExitInfo);
8993 ExitInfo.uReason = pVmxTransient->uExitReason;
8994 ExitInfo.u64Qual = pVmxTransient->uExitQual;
8995 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
8996 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
8997 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
8998
8999 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9000 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9001 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9002 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9003 {
9004 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9005 rcStrict = VINF_SUCCESS;
9006 }
9007 return rcStrict;
9008}
9009
9010
9011/**
9012 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9013 */
9014HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9015{
9016 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9017
9018 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9019 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9020 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9021 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9022 | CPUMCTX_EXTRN_HWVIRT
9023 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9024 AssertRCReturn(rc, rc);
9025
9026 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9027
9028 VMXVEXITINFO ExitInfo;
9029 RT_ZERO(ExitInfo);
9030 ExitInfo.uReason = pVmxTransient->uExitReason;
9031 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9032 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9033 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9034 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9035
9036 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9037 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9038 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9039 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9040 {
9041 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9042 rcStrict = VINF_SUCCESS;
9043 }
9044 return rcStrict;
9045}
9046
9047
9048/**
9049 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9050 */
9051HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9052{
9053 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9054
9055 /*
9056 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9057 * thus might not need to import the shadow VMCS state, it's safer just in case
9058 * code elsewhere dares look at unsynced VMCS fields.
9059 */
9060 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9061 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9062 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9063 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9064 | CPUMCTX_EXTRN_HWVIRT
9065 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9066 AssertRCReturn(rc, rc);
9067
9068 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9069
9070 VMXVEXITINFO ExitInfo;
9071 RT_ZERO(ExitInfo);
9072 ExitInfo.uReason = pVmxTransient->uExitReason;
9073 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9074 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9075 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9076 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9077 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9078
9079 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9080 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9081 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9082 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9083 {
9084 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9085 rcStrict = VINF_SUCCESS;
9086 }
9087 return rcStrict;
9088}
9089
9090
9091/**
9092 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9093 */
9094HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9095{
9096 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9097
9098 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9099 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9100 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9101 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9102 AssertRCReturn(rc, rc);
9103
9104 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9105
9106 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9107 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9108 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9109 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9110 {
9111 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9112 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9113 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9114 }
9115 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9116 return rcStrict;
9117}
9118
9119
9120/**
9121 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9122 */
9123HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9124{
9125 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9126
9127 /*
9128 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9129 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9130 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9131 */
9132 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9133 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9134 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9135 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9136 | CPUMCTX_EXTRN_HWVIRT
9137 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9138 AssertRCReturn(rc, rc);
9139
9140 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9141
9142 VMXVEXITINFO ExitInfo;
9143 RT_ZERO(ExitInfo);
9144 ExitInfo.uReason = pVmxTransient->uExitReason;
9145 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9146 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9147 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9148 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9149 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9150
9151 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9152 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9153 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9154 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9155 {
9156 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9157 rcStrict = VINF_SUCCESS;
9158 }
9159 return rcStrict;
9160}
9161
9162
9163/**
9164 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9165 */
9166HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9167{
9168 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9169
9170 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9171 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
9172 | CPUMCTX_EXTRN_HWVIRT
9173 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9174 AssertRCReturn(rc, rc);
9175
9176 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9177
9178 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9179 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9180 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9181 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9182 {
9183 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9184 rcStrict = VINF_SUCCESS;
9185 }
9186 return rcStrict;
9187}
9188
9189
9190/**
9191 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9192 */
9193HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9194{
9195 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9196
9197 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9198 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9199 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9200 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9201 | CPUMCTX_EXTRN_HWVIRT
9202 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9203 AssertRCReturn(rc, rc);
9204
9205 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9206
9207 VMXVEXITINFO ExitInfo;
9208 RT_ZERO(ExitInfo);
9209 ExitInfo.uReason = pVmxTransient->uExitReason;
9210 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9211 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9212 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9213 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9214
9215 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9216 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9217 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9218 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9219 {
9220 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9221 rcStrict = VINF_SUCCESS;
9222 }
9223 return rcStrict;
9224}
9225
9226
9227/**
9228 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9229 */
9230HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9231{
9232 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9233
9234 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9235 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9236 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9237 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9238 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9239 AssertRCReturn(rc, rc);
9240
9241 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9242
9243 VMXVEXITINFO ExitInfo;
9244 RT_ZERO(ExitInfo);
9245 ExitInfo.uReason = pVmxTransient->uExitReason;
9246 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9247 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9248 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9249 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9250
9251 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9252 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9253 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9254 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9255 {
9256 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9257 rcStrict = VINF_SUCCESS;
9258 }
9259 return rcStrict;
9260}
9261
9262
9263# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9264/**
9265 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9266 */
9267HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9268{
9269 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9270
9271 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9272 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9273 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9274 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9275 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9276 AssertRCReturn(rc, rc);
9277
9278 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9279
9280 VMXVEXITINFO ExitInfo;
9281 RT_ZERO(ExitInfo);
9282 ExitInfo.uReason = pVmxTransient->uExitReason;
9283 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9284 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9285 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9286 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9287
9288 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9289 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9290 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9291 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9292 {
9293 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9294 rcStrict = VINF_SUCCESS;
9295 }
9296 return rcStrict;
9297}
9298# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9299#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9300/** @} */
9301
9302
9303#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9304/** @name Nested-guest VM-exit handlers.
9305 * @{
9306 */
9307/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9308/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9309/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9310
9311/**
9312 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9313 * Conditional VM-exit.
9314 */
9315HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9316{
9317 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9318
9319 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
9320
9321 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9322 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9323 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9324
9325 switch (uExitIntType)
9326 {
9327#ifndef IN_NEM_DARWIN
9328 /*
9329 * Physical NMIs:
9330 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9331 */
9332 case VMX_EXIT_INT_INFO_TYPE_NMI:
9333 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9334#endif
9335
9336 /*
9337 * Hardware exceptions,
9338 * Software exceptions,
9339 * Privileged software exceptions:
9340 * Figure out if the exception must be delivered to the guest or the nested-guest.
9341 */
9342 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9343 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9344 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9345 {
9346 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
9347 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9348 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9349 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9350
9351 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9352 bool const fIntercept = CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo),
9353 pVmxTransient->uExitIntErrorCode);
9354 if (fIntercept)
9355 {
9356 /* Exit qualification is required for debug and page-fault exceptions. */
9357 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9358
9359 /*
9360 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9361 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9362 * length. However, if delivery of a software interrupt, software exception or privileged
9363 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9364 */
9365 VMXVEXITINFO ExitInfo;
9366 RT_ZERO(ExitInfo);
9367 ExitInfo.uReason = pVmxTransient->uExitReason;
9368 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9369 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9370
9371 VMXVEXITEVENTINFO ExitEventInfo;
9372 RT_ZERO(ExitEventInfo);
9373 ExitEventInfo.uExitIntInfo = pVmxTransient->uExitIntInfo;
9374 ExitEventInfo.uExitIntErrCode = pVmxTransient->uExitIntErrorCode;
9375 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9376 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9377
9378#ifdef DEBUG_ramshankar
9379 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9380 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n", pVmxTransient->uExitIntInfo,
9381 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9382 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9383 {
9384 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n", pVmxTransient->uIdtVectoringInfo,
9385 pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9386 }
9387#endif
9388 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9389 }
9390
9391 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9392 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9393 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9394 }
9395
9396 /*
9397 * Software interrupts:
9398 * VM-exits cannot be caused by software interrupts.
9399 *
9400 * External interrupts:
9401 * This should only happen when "acknowledge external interrupts on VM-exit"
9402 * control is set. However, we never set this when executing a guest or
9403 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9404 * the guest.
9405 */
9406 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9407 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9408 default:
9409 {
9410 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9411 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9412 }
9413 }
9414}
9415
9416
9417/**
9418 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
9419 * Unconditional VM-exit.
9420 */
9421HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9422{
9423 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9424 return IEMExecVmxVmexitTripleFault(pVCpu);
9425}
9426
9427
9428/**
9429 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9430 */
9431HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9432{
9433 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9434
9435 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
9436 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9437 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9438}
9439
9440
9441/**
9442 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9443 */
9444HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9445{
9446 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9447
9448 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
9449 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9450 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9451}
9452
9453
9454/**
9455 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
9456 * Unconditional VM-exit.
9457 */
9458HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9459{
9460 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9461
9462 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9463 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9464 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9465 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9466
9467 VMXVEXITINFO ExitInfo;
9468 RT_ZERO(ExitInfo);
9469 ExitInfo.uReason = pVmxTransient->uExitReason;
9470 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9471 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9472
9473 VMXVEXITEVENTINFO ExitEventInfo;
9474 RT_ZERO(ExitEventInfo);
9475 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9476 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9477 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
9478}
9479
9480
9481/**
9482 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
9483 */
9484HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9485{
9486 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9487
9488 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
9489 {
9490 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9491 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9492 }
9493 return vmxHCExitHlt(pVCpu, pVmxTransient);
9494}
9495
9496
9497/**
9498 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
9499 */
9500HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9501{
9502 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9503
9504 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
9505 {
9506 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9507 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9508
9509 VMXVEXITINFO ExitInfo;
9510 RT_ZERO(ExitInfo);
9511 ExitInfo.uReason = pVmxTransient->uExitReason;
9512 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9513 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9514 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9515 }
9516 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
9517}
9518
9519
9520/**
9521 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
9522 */
9523HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9524{
9525 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9526
9527 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
9528 {
9529 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9530 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9531 }
9532 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
9533}
9534
9535
9536/**
9537 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
9538 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
9539 */
9540HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9541{
9542 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9543
9544 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
9545 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
9546
9547 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9548
9549 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
9550 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9551 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9552
9553 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
9554 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
9555 u64VmcsField &= UINT64_C(0xffffffff);
9556
9557 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
9558 {
9559 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9560 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9561
9562 VMXVEXITINFO ExitInfo;
9563 RT_ZERO(ExitInfo);
9564 ExitInfo.uReason = pVmxTransient->uExitReason;
9565 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9566 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9567 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9568 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9569 }
9570
9571 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
9572 return vmxHCExitVmread(pVCpu, pVmxTransient);
9573 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
9574}
9575
9576
9577/**
9578 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
9579 */
9580HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9581{
9582 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9583
9584 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
9585 {
9586 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9587 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9588 }
9589
9590 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
9591}
9592
9593
9594/**
9595 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
9596 * Conditional VM-exit.
9597 */
9598HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9599{
9600 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9601
9602 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9603 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9604
9605 VBOXSTRICTRC rcStrict;
9606 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
9607 switch (uAccessType)
9608 {
9609 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
9610 {
9611 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9612 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9613 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9614 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9615
9616 bool fIntercept;
9617 switch (iCrReg)
9618 {
9619 case 0:
9620 case 4:
9621 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
9622 break;
9623
9624 case 3:
9625 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
9626 break;
9627
9628 case 8:
9629 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
9630 break;
9631
9632 default:
9633 fIntercept = false;
9634 break;
9635 }
9636 if (fIntercept)
9637 {
9638 VMXVEXITINFO ExitInfo;
9639 RT_ZERO(ExitInfo);
9640 ExitInfo.uReason = pVmxTransient->uExitReason;
9641 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9642 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9643 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9644 }
9645 else
9646 {
9647 int const rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
9648 AssertRCReturn(rc, rc);
9649 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9650 }
9651 break;
9652 }
9653
9654 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
9655 {
9656 /*
9657 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
9658 * CR2 reads do not cause a VM-exit.
9659 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
9660 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
9661 */
9662 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9663 if ( iCrReg == 3
9664 || iCrReg == 8)
9665 {
9666 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
9667 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
9668 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
9669 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
9670 {
9671 VMXVEXITINFO ExitInfo;
9672 RT_ZERO(ExitInfo);
9673 ExitInfo.uReason = pVmxTransient->uExitReason;
9674 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9675 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9676 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9677 }
9678 else
9679 {
9680 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9681 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9682 }
9683 }
9684 else
9685 {
9686 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
9687 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
9688 }
9689 break;
9690 }
9691
9692 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
9693 {
9694 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
9695 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
9696 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
9697 if ( (uGstHostMask & X86_CR0_TS)
9698 && (uReadShadow & X86_CR0_TS))
9699 {
9700 VMXVEXITINFO ExitInfo;
9701 RT_ZERO(ExitInfo);
9702 ExitInfo.uReason = pVmxTransient->uExitReason;
9703 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9704 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9705 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9706 }
9707 else
9708 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
9709 break;
9710 }
9711
9712 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
9713 {
9714 RTGCPTR GCPtrEffDst;
9715 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
9716 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
9717 if (fMemOperand)
9718 {
9719 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9720 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
9721 }
9722 else
9723 GCPtrEffDst = NIL_RTGCPTR;
9724
9725 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
9726 {
9727 VMXVEXITINFO ExitInfo;
9728 RT_ZERO(ExitInfo);
9729 ExitInfo.uReason = pVmxTransient->uExitReason;
9730 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9731 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
9732 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9733 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9734 }
9735 else
9736 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
9737 break;
9738 }
9739
9740 default:
9741 {
9742 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
9743 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
9744 }
9745 }
9746
9747 if (rcStrict == VINF_IEM_RAISED_XCPT)
9748 {
9749 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9750 rcStrict = VINF_SUCCESS;
9751 }
9752 return rcStrict;
9753}
9754
9755
9756/**
9757 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
9758 * Conditional VM-exit.
9759 */
9760HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9761{
9762 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9763
9764 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
9765 {
9766 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9767 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9768
9769 VMXVEXITINFO ExitInfo;
9770 RT_ZERO(ExitInfo);
9771 ExitInfo.uReason = pVmxTransient->uExitReason;
9772 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9773 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9774 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9775 }
9776 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
9777}
9778
9779
9780/**
9781 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
9782 * Conditional VM-exit.
9783 */
9784HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9785{
9786 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9787
9788 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9789
9790 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
9791 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
9792 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
9793
9794 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
9795 uint8_t const cbAccess = s_aIOSizes[uIOSize];
9796 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
9797 {
9798 /*
9799 * IN/OUT instruction:
9800 * - Provides VM-exit instruction length.
9801 *
9802 * INS/OUTS instruction:
9803 * - Provides VM-exit instruction length.
9804 * - Provides Guest-linear address.
9805 * - Optionally provides VM-exit instruction info (depends on CPU feature).
9806 */
9807 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9808 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9809
9810 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
9811 pVmxTransient->ExitInstrInfo.u = 0;
9812 pVmxTransient->uGuestLinearAddr = 0;
9813
9814 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
9815 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
9816 if (fIOString)
9817 {
9818 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9819 if (fVmxInsOutsInfo)
9820 {
9821 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
9822 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9823 }
9824 }
9825
9826 VMXVEXITINFO ExitInfo;
9827 RT_ZERO(ExitInfo);
9828 ExitInfo.uReason = pVmxTransient->uExitReason;
9829 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9830 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9831 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9832 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
9833 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9834 }
9835 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
9836}
9837
9838
9839/**
9840 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
9841 */
9842HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9843{
9844 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9845
9846 uint32_t fMsrpm;
9847 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9848 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9849 else
9850 fMsrpm = VMXMSRPM_EXIT_RD;
9851
9852 if (fMsrpm & VMXMSRPM_EXIT_RD)
9853 {
9854 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9855 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9856 }
9857 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
9858}
9859
9860
9861/**
9862 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
9863 */
9864HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9865{
9866 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9867
9868 uint32_t fMsrpm;
9869 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9870 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9871 else
9872 fMsrpm = VMXMSRPM_EXIT_WR;
9873
9874 if (fMsrpm & VMXMSRPM_EXIT_WR)
9875 {
9876 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9877 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9878 }
9879 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
9880}
9881
9882
9883/**
9884 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
9885 */
9886HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9887{
9888 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9889
9890 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
9891 {
9892 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9893 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9894 }
9895 return vmxHCExitMwait(pVCpu, pVmxTransient);
9896}
9897
9898
9899/**
9900 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
9901 * VM-exit.
9902 */
9903HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9904{
9905 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9906
9907 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
9908 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9909 VMXVEXITINFO ExitInfo;
9910 RT_ZERO(ExitInfo);
9911 ExitInfo.uReason = pVmxTransient->uExitReason;
9912 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9913 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9914}
9915
9916
9917/**
9918 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
9919 */
9920HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9921{
9922 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9923
9924 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
9925 {
9926 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9927 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9928 }
9929 return vmxHCExitMonitor(pVCpu, pVmxTransient);
9930}
9931
9932
9933/**
9934 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
9935 */
9936HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9937{
9938 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9939
9940 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
9941 * PAUSE when executing a nested-guest? If it does not, we would not need
9942 * to check for the intercepts here. Just call VM-exit... */
9943
9944 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
9945 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
9946 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
9947 {
9948 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9949 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9950 }
9951 return vmxHCExitPause(pVCpu, pVmxTransient);
9952}
9953
9954
9955/**
9956 * Nested-guest VM-exit handler for when the TPR value is lowered below the
9957 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
9958 */
9959HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9960{
9961 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9962
9963 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
9964 {
9965 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9966 VMXVEXITINFO ExitInfo;
9967 RT_ZERO(ExitInfo);
9968 ExitInfo.uReason = pVmxTransient->uExitReason;
9969 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9970 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9971 }
9972 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
9973}
9974
9975
9976/**
9977 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
9978 * VM-exit.
9979 */
9980HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9981{
9982 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9983
9984 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9985 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9986 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9987 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9988
9989 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
9990
9991 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
9992 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
9993
9994 VMXVEXITINFO ExitInfo;
9995 RT_ZERO(ExitInfo);
9996 ExitInfo.uReason = pVmxTransient->uExitReason;
9997 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9998 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9999
10000 VMXVEXITEVENTINFO ExitEventInfo;
10001 RT_ZERO(ExitEventInfo);
10002 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10003 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10004 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10005}
10006
10007
10008/**
10009 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10010 * Conditional VM-exit.
10011 */
10012HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10013{
10014 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10015
10016 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10017 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10018 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10019}
10020
10021
10022/**
10023 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10024 * Conditional VM-exit.
10025 */
10026HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10027{
10028 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10029
10030 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10031 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10032 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10033}
10034
10035
10036/**
10037 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10038 */
10039HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10040{
10041 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10042
10043 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10044 {
10045 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10046 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10047 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10048 }
10049 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10050}
10051
10052
10053/**
10054 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10055 */
10056HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10057{
10058 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10059
10060 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10061 {
10062 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10063 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10064 }
10065 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10066}
10067
10068
10069/**
10070 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10071 */
10072HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10073{
10074 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10075
10076 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10077 {
10078 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10079 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10080 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10081 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10082
10083 VMXVEXITINFO ExitInfo;
10084 RT_ZERO(ExitInfo);
10085 ExitInfo.uReason = pVmxTransient->uExitReason;
10086 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10087 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10088 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10089 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10090 }
10091 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10092}
10093
10094
10095/**
10096 * Nested-guest VM-exit handler for invalid-guest state
10097 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10098 */
10099HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10100{
10101 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10102
10103 /*
10104 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10105 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10106 * Handle it like it's in an invalid guest state of the outer guest.
10107 *
10108 * When the fast path is implemented, this should be changed to cause the corresponding
10109 * nested-guest VM-exit.
10110 */
10111 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10112}
10113
10114
10115/**
10116 * Nested-guest VM-exit handler for instructions that cause VM-exits uncondtionally
10117 * and only provide the instruction length.
10118 *
10119 * Unconditional VM-exit.
10120 */
10121HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10122{
10123 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10124
10125#ifdef VBOX_STRICT
10126 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10127 switch (pVmxTransient->uExitReason)
10128 {
10129 case VMX_EXIT_ENCLS:
10130 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10131 break;
10132
10133 case VMX_EXIT_VMFUNC:
10134 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10135 break;
10136 }
10137#endif
10138
10139 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10140 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10141}
10142
10143
10144/**
10145 * Nested-guest VM-exit handler for instructions that provide instruction length as
10146 * well as more information.
10147 *
10148 * Unconditional VM-exit.
10149 */
10150HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10151{
10152 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10153
10154#ifdef VBOX_STRICT
10155 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10156 switch (pVmxTransient->uExitReason)
10157 {
10158 case VMX_EXIT_GDTR_IDTR_ACCESS:
10159 case VMX_EXIT_LDTR_TR_ACCESS:
10160 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10161 break;
10162
10163 case VMX_EXIT_RDRAND:
10164 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10165 break;
10166
10167 case VMX_EXIT_RDSEED:
10168 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10169 break;
10170
10171 case VMX_EXIT_XSAVES:
10172 case VMX_EXIT_XRSTORS:
10173 /** @todo NSTVMX: Verify XSS-bitmap. */
10174 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10175 break;
10176
10177 case VMX_EXIT_UMWAIT:
10178 case VMX_EXIT_TPAUSE:
10179 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10180 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10181 break;
10182
10183 case VMX_EXIT_LOADIWKEY:
10184 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10185 break;
10186 }
10187#endif
10188
10189 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10190 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10191 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10192
10193 VMXVEXITINFO ExitInfo;
10194 RT_ZERO(ExitInfo);
10195 ExitInfo.uReason = pVmxTransient->uExitReason;
10196 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10197 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10198 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10199 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10200}
10201
10202
10203# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10204/**
10205 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10206 * Conditional VM-exit.
10207 */
10208HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10209{
10210 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10211 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10212
10213 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10214 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10215 {
10216 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
10217 AssertRCReturn(rc, rc);
10218
10219 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10220 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10221 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10222
10223 RTGCPHYS const GCPhysNested = pVmxTransient->uGuestPhysicalAddr;
10224 uint64_t const uExitQual = pVmxTransient->uExitQual;
10225
10226 RTGCPTR GCPtrNested;
10227 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10228 if (fIsLinearAddrValid)
10229 {
10230 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
10231 GCPtrNested = pVmxTransient->uGuestLinearAddr;
10232 }
10233 else
10234 GCPtrNested = 0;
10235
10236 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10237 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10238 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10239 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10240 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10241
10242 PGMPTWALK Walk;
10243 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10244 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, CPUMCTX2CORE(pCtx), GCPhysNested,
10245 fIsLinearAddrValid, GCPtrNested, &Walk);
10246 if (RT_SUCCESS(rcStrict))
10247 {
10248 if (rcStrict == VINF_SUCCESS)
10249 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10250 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10251 {
10252 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10253 rcStrict = VINF_SUCCESS;
10254 }
10255 return rcStrict;
10256 }
10257
10258 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10259 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10260
10261 VMXVEXITEVENTINFO ExitEventInfo;
10262 RT_ZERO(ExitEventInfo);
10263 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10264 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10265
10266 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10267 {
10268 VMXVEXITINFO ExitInfo;
10269 RT_ZERO(ExitInfo);
10270 ExitInfo.uReason = VMX_EXIT_EPT_VIOLATION;
10271 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10272 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10273 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
10274 ExitInfo.u64GuestPhysAddr = pVmxTransient->uGuestPhysicalAddr;
10275 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10276 }
10277
10278 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10279 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10280 }
10281
10282 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10283}
10284
10285
10286/**
10287 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10288 * Conditional VM-exit.
10289 */
10290HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10291{
10292 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10293 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10294
10295 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10296 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10297 {
10298 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
10299 AssertRCReturn(rc, rc);
10300
10301 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10302
10303 PGMPTWALK Walk;
10304 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10305 RTGCPHYS const GCPhysNested = pVmxTransient->uGuestPhysicalAddr;
10306 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, CPUMCTX2CORE(pCtx),
10307 GCPhysNested, false /* fIsLinearAddrValid */,
10308 0 /* GCPtrNested*/, &Walk);
10309 if (RT_SUCCESS(rcStrict))
10310 return VINF_EM_RAW_EMULATE_INSTR;
10311
10312 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10313 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10314
10315 VMXVEXITEVENTINFO ExitEventInfo;
10316 RT_ZERO(ExitEventInfo);
10317 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10318 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10319
10320 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10321 }
10322
10323 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10324}
10325# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10326
10327/** @} */
10328#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10329
10330
10331/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10332 * probes.
10333 *
10334 * The following few functions and associated structure contains the bloat
10335 * necessary for providing detailed debug events and dtrace probes as well as
10336 * reliable host side single stepping. This works on the principle of
10337 * "subclassing" the normal execution loop and workers. We replace the loop
10338 * method completely and override selected helpers to add necessary adjustments
10339 * to their core operation.
10340 *
10341 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10342 * any performance for debug and analysis features.
10343 *
10344 * @{
10345 */
10346
10347/**
10348 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10349 * the debug run loop.
10350 */
10351typedef struct VMXRUNDBGSTATE
10352{
10353 /** The RIP we started executing at. This is for detecting that we stepped. */
10354 uint64_t uRipStart;
10355 /** The CS we started executing with. */
10356 uint16_t uCsStart;
10357
10358 /** Whether we've actually modified the 1st execution control field. */
10359 bool fModifiedProcCtls : 1;
10360 /** Whether we've actually modified the 2nd execution control field. */
10361 bool fModifiedProcCtls2 : 1;
10362 /** Whether we've actually modified the exception bitmap. */
10363 bool fModifiedXcptBitmap : 1;
10364
10365 /** We desire the modified the CR0 mask to be cleared. */
10366 bool fClearCr0Mask : 1;
10367 /** We desire the modified the CR4 mask to be cleared. */
10368 bool fClearCr4Mask : 1;
10369 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10370 uint32_t fCpe1Extra;
10371 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10372 uint32_t fCpe1Unwanted;
10373 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10374 uint32_t fCpe2Extra;
10375 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10376 uint32_t bmXcptExtra;
10377 /** The sequence number of the Dtrace provider settings the state was
10378 * configured against. */
10379 uint32_t uDtraceSettingsSeqNo;
10380 /** VM-exits to check (one bit per VM-exit). */
10381 uint32_t bmExitsToCheck[3];
10382
10383 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10384 uint32_t fProcCtlsInitial;
10385 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10386 uint32_t fProcCtls2Initial;
10387 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10388 uint32_t bmXcptInitial;
10389} VMXRUNDBGSTATE;
10390AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10391typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10392
10393
10394/**
10395 * Initializes the VMXRUNDBGSTATE structure.
10396 *
10397 * @param pVCpu The cross context virtual CPU structure of the
10398 * calling EMT.
10399 * @param pVmxTransient The VMX-transient structure.
10400 * @param pDbgState The debug state to initialize.
10401 */
10402static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10403{
10404 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10405 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10406
10407 pDbgState->fModifiedProcCtls = false;
10408 pDbgState->fModifiedProcCtls2 = false;
10409 pDbgState->fModifiedXcptBitmap = false;
10410 pDbgState->fClearCr0Mask = false;
10411 pDbgState->fClearCr4Mask = false;
10412 pDbgState->fCpe1Extra = 0;
10413 pDbgState->fCpe1Unwanted = 0;
10414 pDbgState->fCpe2Extra = 0;
10415 pDbgState->bmXcptExtra = 0;
10416 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10417 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10418 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10419}
10420
10421
10422/**
10423 * Updates the VMSC fields with changes requested by @a pDbgState.
10424 *
10425 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10426 * immediately before executing guest code, i.e. when interrupts are disabled.
10427 * We don't check status codes here as we cannot easily assert or return in the
10428 * latter case.
10429 *
10430 * @param pVCpu The cross context virtual CPU structure.
10431 * @param pVmxTransient The VMX-transient structure.
10432 * @param pDbgState The debug state.
10433 */
10434static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10435{
10436 /*
10437 * Ensure desired flags in VMCS control fields are set.
10438 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10439 *
10440 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10441 * there should be no stale data in pCtx at this point.
10442 */
10443 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10444 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10445 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10446 {
10447 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10448 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10449 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10450 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10451 pDbgState->fModifiedProcCtls = true;
10452 }
10453
10454 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
10455 {
10456 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
10457 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
10458 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
10459 pDbgState->fModifiedProcCtls2 = true;
10460 }
10461
10462 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
10463 {
10464 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
10465 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
10466 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
10467 pDbgState->fModifiedXcptBitmap = true;
10468 }
10469
10470 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
10471 {
10472 pVmcsInfo->u64Cr0Mask = 0;
10473 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
10474 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
10475 }
10476
10477 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
10478 {
10479 pVmcsInfo->u64Cr4Mask = 0;
10480 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
10481 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
10482 }
10483
10484 NOREF(pVCpu);
10485}
10486
10487
10488/**
10489 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
10490 * re-entry next time around.
10491 *
10492 * @returns Strict VBox status code (i.e. informational status codes too).
10493 * @param pVCpu The cross context virtual CPU structure.
10494 * @param pVmxTransient The VMX-transient structure.
10495 * @param pDbgState The debug state.
10496 * @param rcStrict The return code from executing the guest using single
10497 * stepping.
10498 */
10499static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
10500 VBOXSTRICTRC rcStrict)
10501{
10502 /*
10503 * Restore VM-exit control settings as we may not reenter this function the
10504 * next time around.
10505 */
10506 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10507
10508 /* We reload the initial value, trigger what we can of recalculations the
10509 next time around. From the looks of things, that's all that's required atm. */
10510 if (pDbgState->fModifiedProcCtls)
10511 {
10512 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
10513 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
10514 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
10515 AssertRC(rc2);
10516 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
10517 }
10518
10519 /* We're currently the only ones messing with this one, so just restore the
10520 cached value and reload the field. */
10521 if ( pDbgState->fModifiedProcCtls2
10522 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
10523 {
10524 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
10525 AssertRC(rc2);
10526 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
10527 }
10528
10529 /* If we've modified the exception bitmap, we restore it and trigger
10530 reloading and partial recalculation the next time around. */
10531 if (pDbgState->fModifiedXcptBitmap)
10532 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
10533
10534 return rcStrict;
10535}
10536
10537
10538/**
10539 * Configures VM-exit controls for current DBGF and DTrace settings.
10540 *
10541 * This updates @a pDbgState and the VMCS execution control fields to reflect
10542 * the necessary VM-exits demanded by DBGF and DTrace.
10543 *
10544 * @param pVCpu The cross context virtual CPU structure.
10545 * @param pVmxTransient The VMX-transient structure. May update
10546 * fUpdatedTscOffsettingAndPreemptTimer.
10547 * @param pDbgState The debug state.
10548 */
10549static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10550{
10551#ifndef IN_NEM_DARWIN
10552 /*
10553 * Take down the dtrace serial number so we can spot changes.
10554 */
10555 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
10556 ASMCompilerBarrier();
10557#endif
10558
10559 /*
10560 * We'll rebuild most of the middle block of data members (holding the
10561 * current settings) as we go along here, so start by clearing it all.
10562 */
10563 pDbgState->bmXcptExtra = 0;
10564 pDbgState->fCpe1Extra = 0;
10565 pDbgState->fCpe1Unwanted = 0;
10566 pDbgState->fCpe2Extra = 0;
10567 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
10568 pDbgState->bmExitsToCheck[i] = 0;
10569
10570 /*
10571 * Software interrupts (INT XXh) - no idea how to trigger these...
10572 */
10573 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10574 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
10575 || VBOXVMM_INT_SOFTWARE_ENABLED())
10576 {
10577 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10578 }
10579
10580 /*
10581 * INT3 breakpoints - triggered by #BP exceptions.
10582 */
10583 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
10584 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10585
10586 /*
10587 * Exception bitmap and XCPT events+probes.
10588 */
10589 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
10590 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
10591 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
10592
10593 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
10594 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
10595 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10596 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
10597 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
10598 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
10599 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
10600 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
10601 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
10602 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
10603 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
10604 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
10605 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
10606 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
10607 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
10608 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
10609 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
10610 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
10611
10612 if (pDbgState->bmXcptExtra)
10613 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10614
10615 /*
10616 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
10617 *
10618 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
10619 * So, when adding/changing/removing please don't forget to update it.
10620 *
10621 * Some of the macros are picking up local variables to save horizontal space,
10622 * (being able to see it in a table is the lesser evil here).
10623 */
10624#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
10625 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
10626 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
10627#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
10628 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10629 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10630 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10631 } else do { } while (0)
10632#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
10633 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10634 { \
10635 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
10636 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10637 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10638 } else do { } while (0)
10639#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
10640 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10641 { \
10642 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
10643 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10644 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10645 } else do { } while (0)
10646#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
10647 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10648 { \
10649 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
10650 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10651 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10652 } else do { } while (0)
10653
10654 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
10655 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
10656 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
10657 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
10658 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
10659
10660 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
10661 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
10662 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
10663 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
10664 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
10665 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
10666 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
10667 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
10668 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
10669 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
10670 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
10671 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
10672 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
10673 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
10674 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
10675 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
10676 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
10677 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
10678 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
10679 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
10680 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
10681 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
10682 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
10683 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
10684 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
10685 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
10686 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
10687 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
10688 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
10689 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
10690 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
10691 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
10692 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
10693 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
10694 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
10695 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
10696
10697 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
10698 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10699 {
10700 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
10701 | CPUMCTX_EXTRN_APIC_TPR);
10702 AssertRC(rc);
10703
10704#if 0 /** @todo fix me */
10705 pDbgState->fClearCr0Mask = true;
10706 pDbgState->fClearCr4Mask = true;
10707#endif
10708 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
10709 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
10710 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10711 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
10712 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
10713 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
10714 require clearing here and in the loop if we start using it. */
10715 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
10716 }
10717 else
10718 {
10719 if (pDbgState->fClearCr0Mask)
10720 {
10721 pDbgState->fClearCr0Mask = false;
10722 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
10723 }
10724 if (pDbgState->fClearCr4Mask)
10725 {
10726 pDbgState->fClearCr4Mask = false;
10727 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
10728 }
10729 }
10730 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
10731 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
10732
10733 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
10734 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
10735 {
10736 /** @todo later, need to fix handler as it assumes this won't usually happen. */
10737 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
10738 }
10739 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
10740 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
10741
10742 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
10743 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
10744 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
10745 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
10746 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
10747 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
10748 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
10749 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
10750#if 0 /** @todo too slow, fix handler. */
10751 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
10752#endif
10753 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
10754
10755 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
10756 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
10757 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
10758 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
10759 {
10760 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10761 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
10762 }
10763 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10764 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10765 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10766 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10767
10768 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
10769 || IS_EITHER_ENABLED(pVM, INSTR_STR)
10770 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
10771 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
10772 {
10773 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10774 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
10775 }
10776 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
10777 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
10778 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
10779 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
10780
10781 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
10782 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
10783 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
10784 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
10785 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
10786 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
10787 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
10788 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
10789 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
10790 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
10791 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
10792 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
10793 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
10794 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
10795 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
10796 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
10797 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
10798 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
10799 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
10800 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
10801 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
10802 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
10803
10804#undef IS_EITHER_ENABLED
10805#undef SET_ONLY_XBM_IF_EITHER_EN
10806#undef SET_CPE1_XBM_IF_EITHER_EN
10807#undef SET_CPEU_XBM_IF_EITHER_EN
10808#undef SET_CPE2_XBM_IF_EITHER_EN
10809
10810 /*
10811 * Sanitize the control stuff.
10812 */
10813 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
10814 if (pDbgState->fCpe2Extra)
10815 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
10816 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
10817 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
10818#ifndef IN_NEM_DARWIN
10819 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10820 {
10821 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
10822 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10823 }
10824#else
10825 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10826 {
10827 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
10828 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10829 }
10830#endif
10831
10832 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
10833 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
10834 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
10835 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
10836}
10837
10838
10839/**
10840 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
10841 * appropriate.
10842 *
10843 * The caller has checked the VM-exit against the
10844 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
10845 * already, so we don't have to do that either.
10846 *
10847 * @returns Strict VBox status code (i.e. informational status codes too).
10848 * @param pVCpu The cross context virtual CPU structure.
10849 * @param pVmxTransient The VMX-transient structure.
10850 * @param uExitReason The VM-exit reason.
10851 *
10852 * @remarks The name of this function is displayed by dtrace, so keep it short
10853 * and to the point. No longer than 33 chars long, please.
10854 */
10855static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
10856{
10857 /*
10858 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
10859 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
10860 *
10861 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
10862 * does. Must add/change/remove both places. Same ordering, please.
10863 *
10864 * Added/removed events must also be reflected in the next section
10865 * where we dispatch dtrace events.
10866 */
10867 bool fDtrace1 = false;
10868 bool fDtrace2 = false;
10869 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
10870 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
10871 uint32_t uEventArg = 0;
10872#define SET_EXIT(a_EventSubName) \
10873 do { \
10874 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10875 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10876 } while (0)
10877#define SET_BOTH(a_EventSubName) \
10878 do { \
10879 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
10880 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10881 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
10882 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10883 } while (0)
10884 switch (uExitReason)
10885 {
10886 case VMX_EXIT_MTF:
10887 return vmxHCExitMtf(pVCpu, pVmxTransient);
10888
10889 case VMX_EXIT_XCPT_OR_NMI:
10890 {
10891 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
10892 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
10893 {
10894 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10895 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10896 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10897 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
10898 {
10899 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
10900 {
10901 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
10902 uEventArg = pVmxTransient->uExitIntErrorCode;
10903 }
10904 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
10905 switch (enmEvent1)
10906 {
10907 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
10908 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
10909 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
10910 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
10911 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
10912 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
10913 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
10914 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
10915 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
10916 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
10917 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
10918 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
10919 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
10920 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
10921 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
10922 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
10923 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
10924 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
10925 default: break;
10926 }
10927 }
10928 else
10929 AssertFailed();
10930 break;
10931
10932 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10933 uEventArg = idxVector;
10934 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
10935 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
10936 break;
10937 }
10938 break;
10939 }
10940
10941 case VMX_EXIT_TRIPLE_FAULT:
10942 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
10943 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
10944 break;
10945 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
10946 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
10947 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
10948 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
10949 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
10950
10951 /* Instruction specific VM-exits: */
10952 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
10953 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
10954 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
10955 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
10956 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
10957 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
10958 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
10959 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
10960 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
10961 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
10962 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
10963 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
10964 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
10965 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
10966 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
10967 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
10968 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
10969 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
10970 case VMX_EXIT_MOV_CRX:
10971 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10972 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
10973 SET_BOTH(CRX_READ);
10974 else
10975 SET_BOTH(CRX_WRITE);
10976 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10977 break;
10978 case VMX_EXIT_MOV_DRX:
10979 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10980 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
10981 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
10982 SET_BOTH(DRX_READ);
10983 else
10984 SET_BOTH(DRX_WRITE);
10985 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
10986 break;
10987 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
10988 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
10989 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
10990 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
10991 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
10992 case VMX_EXIT_GDTR_IDTR_ACCESS:
10993 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10994 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
10995 {
10996 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
10997 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
10998 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
10999 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11000 }
11001 break;
11002
11003 case VMX_EXIT_LDTR_TR_ACCESS:
11004 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
11005 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11006 {
11007 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11008 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11009 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11010 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11011 }
11012 break;
11013
11014 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11015 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11016 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11017 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11018 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11019 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11020 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11021 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11022 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11023 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11024 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11025
11026 /* Events that aren't relevant at this point. */
11027 case VMX_EXIT_EXT_INT:
11028 case VMX_EXIT_INT_WINDOW:
11029 case VMX_EXIT_NMI_WINDOW:
11030 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11031 case VMX_EXIT_PREEMPT_TIMER:
11032 case VMX_EXIT_IO_INSTR:
11033 break;
11034
11035 /* Errors and unexpected events. */
11036 case VMX_EXIT_INIT_SIGNAL:
11037 case VMX_EXIT_SIPI:
11038 case VMX_EXIT_IO_SMI:
11039 case VMX_EXIT_SMI:
11040 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11041 case VMX_EXIT_ERR_MSR_LOAD:
11042 case VMX_EXIT_ERR_MACHINE_CHECK:
11043 case VMX_EXIT_PML_FULL:
11044 case VMX_EXIT_VIRTUALIZED_EOI:
11045 break;
11046
11047 default:
11048 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11049 break;
11050 }
11051#undef SET_BOTH
11052#undef SET_EXIT
11053
11054 /*
11055 * Dtrace tracepoints go first. We do them here at once so we don't
11056 * have to copy the guest state saving and stuff a few dozen times.
11057 * Down side is that we've got to repeat the switch, though this time
11058 * we use enmEvent since the probes are a subset of what DBGF does.
11059 */
11060 if (fDtrace1 || fDtrace2)
11061 {
11062 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11063 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11064 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11065 switch (enmEvent1)
11066 {
11067 /** @todo consider which extra parameters would be helpful for each probe. */
11068 case DBGFEVENT_END: break;
11069 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11070 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11071 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11072 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11073 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11074 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11075 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11076 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11077 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11078 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11079 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11080 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11081 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11082 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11083 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11084 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11085 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11086 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11087 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11088 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11089 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11090 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11091 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11092 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11093 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11094 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11095 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11096 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11097 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11098 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11099 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11100 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11101 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11102 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11103 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11104 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11105 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11106 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11107 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11108 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11109 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11110 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11111 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11112 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11113 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11114 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11115 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11116 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11117 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11118 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11119 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11120 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11121 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11122 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11123 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11124 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11125 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11126 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11127 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11128 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11129 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11130 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11131 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11132 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11133 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11134 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11135 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11136 }
11137 switch (enmEvent2)
11138 {
11139 /** @todo consider which extra parameters would be helpful for each probe. */
11140 case DBGFEVENT_END: break;
11141 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11142 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11143 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11144 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11145 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11146 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11147 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11148 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11149 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11150 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11151 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11152 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11153 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11154 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11155 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11156 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11157 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11158 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11159 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11160 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11161 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11162 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11163 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11164 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11165 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11166 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11167 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11168 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11169 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11170 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11171 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11172 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11173 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11174 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11175 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11176 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11177 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11178 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11179 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11180 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11181 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11182 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11183 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11184 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11185 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11186 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11187 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11188 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11189 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11190 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11191 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11192 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11193 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11194 }
11195 }
11196
11197 /*
11198 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11199 * the DBGF call will do a full check).
11200 *
11201 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11202 * Note! If we have to events, we prioritize the first, i.e. the instruction
11203 * one, in order to avoid event nesting.
11204 */
11205 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11206 if ( enmEvent1 != DBGFEVENT_END
11207 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11208 {
11209 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11210 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11211 if (rcStrict != VINF_SUCCESS)
11212 return rcStrict;
11213 }
11214 else if ( enmEvent2 != DBGFEVENT_END
11215 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11216 {
11217 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11218 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11219 if (rcStrict != VINF_SUCCESS)
11220 return rcStrict;
11221 }
11222
11223 return VINF_SUCCESS;
11224}
11225
11226
11227/**
11228 * Single-stepping VM-exit filtering.
11229 *
11230 * This is preprocessing the VM-exits and deciding whether we've gotten far
11231 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11232 * handling is performed.
11233 *
11234 * @returns Strict VBox status code (i.e. informational status codes too).
11235 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11236 * @param pVmxTransient The VMX-transient structure.
11237 * @param pDbgState The debug state.
11238 */
11239DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11240{
11241 /*
11242 * Expensive (saves context) generic dtrace VM-exit probe.
11243 */
11244 uint32_t const uExitReason = pVmxTransient->uExitReason;
11245 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11246 { /* more likely */ }
11247 else
11248 {
11249 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11250 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11251 AssertRC(rc);
11252 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11253 }
11254
11255#ifndef IN_NEM_DARWIN
11256 /*
11257 * Check for host NMI, just to get that out of the way.
11258 */
11259 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11260 { /* normally likely */ }
11261 else
11262 {
11263 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
11264 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11265 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11266 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11267 }
11268#endif
11269
11270 /*
11271 * Check for single stepping event if we're stepping.
11272 */
11273 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11274 {
11275 switch (uExitReason)
11276 {
11277 case VMX_EXIT_MTF:
11278 return vmxHCExitMtf(pVCpu, pVmxTransient);
11279
11280 /* Various events: */
11281 case VMX_EXIT_XCPT_OR_NMI:
11282 case VMX_EXIT_EXT_INT:
11283 case VMX_EXIT_TRIPLE_FAULT:
11284 case VMX_EXIT_INT_WINDOW:
11285 case VMX_EXIT_NMI_WINDOW:
11286 case VMX_EXIT_TASK_SWITCH:
11287 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11288 case VMX_EXIT_APIC_ACCESS:
11289 case VMX_EXIT_EPT_VIOLATION:
11290 case VMX_EXIT_EPT_MISCONFIG:
11291 case VMX_EXIT_PREEMPT_TIMER:
11292
11293 /* Instruction specific VM-exits: */
11294 case VMX_EXIT_CPUID:
11295 case VMX_EXIT_GETSEC:
11296 case VMX_EXIT_HLT:
11297 case VMX_EXIT_INVD:
11298 case VMX_EXIT_INVLPG:
11299 case VMX_EXIT_RDPMC:
11300 case VMX_EXIT_RDTSC:
11301 case VMX_EXIT_RSM:
11302 case VMX_EXIT_VMCALL:
11303 case VMX_EXIT_VMCLEAR:
11304 case VMX_EXIT_VMLAUNCH:
11305 case VMX_EXIT_VMPTRLD:
11306 case VMX_EXIT_VMPTRST:
11307 case VMX_EXIT_VMREAD:
11308 case VMX_EXIT_VMRESUME:
11309 case VMX_EXIT_VMWRITE:
11310 case VMX_EXIT_VMXOFF:
11311 case VMX_EXIT_VMXON:
11312 case VMX_EXIT_MOV_CRX:
11313 case VMX_EXIT_MOV_DRX:
11314 case VMX_EXIT_IO_INSTR:
11315 case VMX_EXIT_RDMSR:
11316 case VMX_EXIT_WRMSR:
11317 case VMX_EXIT_MWAIT:
11318 case VMX_EXIT_MONITOR:
11319 case VMX_EXIT_PAUSE:
11320 case VMX_EXIT_GDTR_IDTR_ACCESS:
11321 case VMX_EXIT_LDTR_TR_ACCESS:
11322 case VMX_EXIT_INVEPT:
11323 case VMX_EXIT_RDTSCP:
11324 case VMX_EXIT_INVVPID:
11325 case VMX_EXIT_WBINVD:
11326 case VMX_EXIT_XSETBV:
11327 case VMX_EXIT_RDRAND:
11328 case VMX_EXIT_INVPCID:
11329 case VMX_EXIT_VMFUNC:
11330 case VMX_EXIT_RDSEED:
11331 case VMX_EXIT_XSAVES:
11332 case VMX_EXIT_XRSTORS:
11333 {
11334 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11335 AssertRCReturn(rc, rc);
11336 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11337 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11338 return VINF_EM_DBG_STEPPED;
11339 break;
11340 }
11341
11342 /* Errors and unexpected events: */
11343 case VMX_EXIT_INIT_SIGNAL:
11344 case VMX_EXIT_SIPI:
11345 case VMX_EXIT_IO_SMI:
11346 case VMX_EXIT_SMI:
11347 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11348 case VMX_EXIT_ERR_MSR_LOAD:
11349 case VMX_EXIT_ERR_MACHINE_CHECK:
11350 case VMX_EXIT_PML_FULL:
11351 case VMX_EXIT_VIRTUALIZED_EOI:
11352 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11353 break;
11354
11355 default:
11356 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11357 break;
11358 }
11359 }
11360
11361 /*
11362 * Check for debugger event breakpoints and dtrace probes.
11363 */
11364 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11365 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11366 {
11367 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11368 if (rcStrict != VINF_SUCCESS)
11369 return rcStrict;
11370 }
11371
11372 /*
11373 * Normal processing.
11374 */
11375#ifdef HMVMX_USE_FUNCTION_TABLE
11376 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11377#else
11378 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11379#endif
11380}
11381
11382/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette