VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 94617

Last change on this file since 94617 was 93963, checked in by vboxsync, 3 years ago

VMM: Nested VMX: bugref:10092 Add HM ring-0 API for querying transient VMX/SVM info.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 490.1 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 93963 2022-02-28 08:39:08Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
23# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
24#endif
25
26
27#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
28# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
29#endif
30
31
32/** Use the function table. */
33#define HMVMX_USE_FUNCTION_TABLE
34
35/** Determine which tagged-TLB flush handler to use. */
36#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
37#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
38#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
39#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
40
41/** Assert that all the given fields have been read from the VMCS. */
42#ifdef VBOX_STRICT
43# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
44 do { \
45 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
46 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
47 } while (0)
48#else
49# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
50#endif
51
52/**
53 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
54 * guest using hardware-assisted VMX.
55 *
56 * This excludes state like GPRs (other than RSP) which are always are
57 * swapped and restored across the world-switch and also registers like EFER,
58 * MSR which cannot be modified by the guest without causing a VM-exit.
59 */
60#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
61 | CPUMCTX_EXTRN_RFLAGS \
62 | CPUMCTX_EXTRN_RSP \
63 | CPUMCTX_EXTRN_SREG_MASK \
64 | CPUMCTX_EXTRN_TABLE_MASK \
65 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
66 | CPUMCTX_EXTRN_SYSCALL_MSRS \
67 | CPUMCTX_EXTRN_SYSENTER_MSRS \
68 | CPUMCTX_EXTRN_TSC_AUX \
69 | CPUMCTX_EXTRN_OTHER_MSRS \
70 | CPUMCTX_EXTRN_CR0 \
71 | CPUMCTX_EXTRN_CR3 \
72 | CPUMCTX_EXTRN_CR4 \
73 | CPUMCTX_EXTRN_DR7 \
74 | CPUMCTX_EXTRN_HWVIRT \
75 | CPUMCTX_EXTRN_INHIBIT_INT \
76 | CPUMCTX_EXTRN_INHIBIT_NMI)
77
78/**
79 * Exception bitmap mask for real-mode guests (real-on-v86).
80 *
81 * We need to intercept all exceptions manually except:
82 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
83 * due to bugs in Intel CPUs.
84 * - \#PF need not be intercepted even in real-mode if we have nested paging
85 * support.
86 */
87#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
88 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
89 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
90 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
91 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
92 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
93 | RT_BIT(X86_XCPT_XF))
94
95/** Maximum VM-instruction error number. */
96#define HMVMX_INSTR_ERROR_MAX 28
97
98/** Profiling macro. */
99#ifdef HM_PROFILE_EXIT_DISPATCH
100# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
101# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
102#else
103# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
104# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
105#endif
106
107#ifndef IN_NEM_DARWIN
108/** Assert that preemption is disabled or covered by thread-context hooks. */
109# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
110 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
111
112/** Assert that we haven't migrated CPUs when thread-context hooks are not
113 * used. */
114# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
115 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
116 ("Illegal migration! Entered on CPU %u Current %u\n", \
117 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
118#else
119# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
120# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
121#endif
122
123/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
124 * context. */
125#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
126 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
127 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
128
129/** Log the VM-exit reason with an easily visible marker to identify it in a
130 * potential sea of logging data. */
131#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
132 do { \
133 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
134 HMGetVmxExitName(a_uExitReason))); \
135 } while (0) \
136
137
138/*********************************************************************************************************************************
139* Structures and Typedefs *
140*********************************************************************************************************************************/
141/**
142 * Memory operand read or write access.
143 */
144typedef enum VMXMEMACCESS
145{
146 VMXMEMACCESS_READ = 0,
147 VMXMEMACCESS_WRITE = 1
148} VMXMEMACCESS;
149
150
151/**
152 * VMX VM-exit handler.
153 *
154 * @returns Strict VBox status code (i.e. informational status codes too).
155 * @param pVCpu The cross context virtual CPU structure.
156 * @param pVmxTransient The VMX-transient structure.
157 */
158#ifndef HMVMX_USE_FUNCTION_TABLE
159typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
160#else
161typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
162/** Pointer to VM-exit handler. */
163typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
164#endif
165
166/**
167 * VMX VM-exit handler, non-strict status code.
168 *
169 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
170 *
171 * @returns VBox status code, no informational status code returned.
172 * @param pVCpu The cross context virtual CPU structure.
173 * @param pVmxTransient The VMX-transient structure.
174 *
175 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
176 * use of that status code will be replaced with VINF_EM_SOMETHING
177 * later when switching over to IEM.
178 */
179#ifndef HMVMX_USE_FUNCTION_TABLE
180typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
181#else
182typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
183#endif
184
185
186/*********************************************************************************************************************************
187* Internal Functions *
188*********************************************************************************************************************************/
189#ifndef HMVMX_USE_FUNCTION_TABLE
190DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
191# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
192# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
193#else
194# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
195# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
196#endif
197#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
198DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
199#endif
200
201static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
202
203/** @name VM-exit handler prototypes.
204 * @{
205 */
206static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
207static FNVMXEXITHANDLER vmxHCExitExtInt;
208static FNVMXEXITHANDLER vmxHCExitTripleFault;
209static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
210static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
211static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
212static FNVMXEXITHANDLER vmxHCExitCpuid;
213static FNVMXEXITHANDLER vmxHCExitGetsec;
214static FNVMXEXITHANDLER vmxHCExitHlt;
215static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
216static FNVMXEXITHANDLER vmxHCExitInvlpg;
217static FNVMXEXITHANDLER vmxHCExitRdpmc;
218static FNVMXEXITHANDLER vmxHCExitVmcall;
219#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
220static FNVMXEXITHANDLER vmxHCExitVmclear;
221static FNVMXEXITHANDLER vmxHCExitVmlaunch;
222static FNVMXEXITHANDLER vmxHCExitVmptrld;
223static FNVMXEXITHANDLER vmxHCExitVmptrst;
224static FNVMXEXITHANDLER vmxHCExitVmread;
225static FNVMXEXITHANDLER vmxHCExitVmresume;
226static FNVMXEXITHANDLER vmxHCExitVmwrite;
227static FNVMXEXITHANDLER vmxHCExitVmxoff;
228static FNVMXEXITHANDLER vmxHCExitVmxon;
229static FNVMXEXITHANDLER vmxHCExitInvvpid;
230# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
231static FNVMXEXITHANDLER vmxHCExitInvept;
232# endif
233#endif
234static FNVMXEXITHANDLER vmxHCExitRdtsc;
235static FNVMXEXITHANDLER vmxHCExitMovCRx;
236static FNVMXEXITHANDLER vmxHCExitMovDRx;
237static FNVMXEXITHANDLER vmxHCExitIoInstr;
238static FNVMXEXITHANDLER vmxHCExitRdmsr;
239static FNVMXEXITHANDLER vmxHCExitWrmsr;
240static FNVMXEXITHANDLER vmxHCExitMwait;
241static FNVMXEXITHANDLER vmxHCExitMtf;
242static FNVMXEXITHANDLER vmxHCExitMonitor;
243static FNVMXEXITHANDLER vmxHCExitPause;
244static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
245static FNVMXEXITHANDLER vmxHCExitApicAccess;
246static FNVMXEXITHANDLER vmxHCExitEptViolation;
247static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
248static FNVMXEXITHANDLER vmxHCExitRdtscp;
249static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
250static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
251static FNVMXEXITHANDLER vmxHCExitXsetbv;
252static FNVMXEXITHANDLER vmxHCExitInvpcid;
253static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
254static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
255static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
256/** @} */
257
258#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
259/** @name Nested-guest VM-exit handler prototypes.
260 * @{
261 */
262static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
263static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
264static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
265static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
266static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
267static FNVMXEXITHANDLER vmxHCExitHltNested;
268static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
269static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
270static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
271static FNVMXEXITHANDLER vmxHCExitRdtscNested;
272static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
273static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
274static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
275static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
276static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
277static FNVMXEXITHANDLER vmxHCExitMwaitNested;
278static FNVMXEXITHANDLER vmxHCExitMtfNested;
279static FNVMXEXITHANDLER vmxHCExitMonitorNested;
280static FNVMXEXITHANDLER vmxHCExitPauseNested;
281static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
282static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
283static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
284static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
286static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
287static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
288static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
289static FNVMXEXITHANDLER vmxHCExitInstrNested;
290static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
291# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
292static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
293static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
294# endif
295/** @} */
296#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
297
298
299/*********************************************************************************************************************************
300* Global Variables *
301*********************************************************************************************************************************/
302#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
303/**
304 * Array of all VMCS fields.
305 * Any fields added to the VT-x spec. should be added here.
306 *
307 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
308 * of nested-guests.
309 */
310static const uint32_t g_aVmcsFields[] =
311{
312 /* 16-bit control fields. */
313 VMX_VMCS16_VPID,
314 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
315 VMX_VMCS16_EPTP_INDEX,
316
317 /* 16-bit guest-state fields. */
318 VMX_VMCS16_GUEST_ES_SEL,
319 VMX_VMCS16_GUEST_CS_SEL,
320 VMX_VMCS16_GUEST_SS_SEL,
321 VMX_VMCS16_GUEST_DS_SEL,
322 VMX_VMCS16_GUEST_FS_SEL,
323 VMX_VMCS16_GUEST_GS_SEL,
324 VMX_VMCS16_GUEST_LDTR_SEL,
325 VMX_VMCS16_GUEST_TR_SEL,
326 VMX_VMCS16_GUEST_INTR_STATUS,
327 VMX_VMCS16_GUEST_PML_INDEX,
328
329 /* 16-bits host-state fields. */
330 VMX_VMCS16_HOST_ES_SEL,
331 VMX_VMCS16_HOST_CS_SEL,
332 VMX_VMCS16_HOST_SS_SEL,
333 VMX_VMCS16_HOST_DS_SEL,
334 VMX_VMCS16_HOST_FS_SEL,
335 VMX_VMCS16_HOST_GS_SEL,
336 VMX_VMCS16_HOST_TR_SEL,
337
338 /* 64-bit control fields. */
339 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
340 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
341 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
342 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
343 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
344 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
345 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
346 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
347 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
348 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
349 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
350 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
351 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
352 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
353 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
354 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
355 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
356 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
357 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
358 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
359 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
360 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
361 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
362 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
363 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
364 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
365 VMX_VMCS64_CTRL_EPTP_FULL,
366 VMX_VMCS64_CTRL_EPTP_HIGH,
367 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
368 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
369 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
370 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
371 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
372 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
373 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
374 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
375 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
376 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
377 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
378 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
379 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
380 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
381 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
382 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
383 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
384 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
385 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
386 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
387 VMX_VMCS64_CTRL_SPPTP_FULL,
388 VMX_VMCS64_CTRL_SPPTP_HIGH,
389 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
390 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
391 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
392 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
393 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
394 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
395
396 /* 64-bit read-only data fields. */
397 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
398 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
399
400 /* 64-bit guest-state fields. */
401 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
402 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
403 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
404 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
405 VMX_VMCS64_GUEST_PAT_FULL,
406 VMX_VMCS64_GUEST_PAT_HIGH,
407 VMX_VMCS64_GUEST_EFER_FULL,
408 VMX_VMCS64_GUEST_EFER_HIGH,
409 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
410 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
411 VMX_VMCS64_GUEST_PDPTE0_FULL,
412 VMX_VMCS64_GUEST_PDPTE0_HIGH,
413 VMX_VMCS64_GUEST_PDPTE1_FULL,
414 VMX_VMCS64_GUEST_PDPTE1_HIGH,
415 VMX_VMCS64_GUEST_PDPTE2_FULL,
416 VMX_VMCS64_GUEST_PDPTE2_HIGH,
417 VMX_VMCS64_GUEST_PDPTE3_FULL,
418 VMX_VMCS64_GUEST_PDPTE3_HIGH,
419 VMX_VMCS64_GUEST_BNDCFGS_FULL,
420 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
421 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
422 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
423 VMX_VMCS64_GUEST_PKRS_FULL,
424 VMX_VMCS64_GUEST_PKRS_HIGH,
425
426 /* 64-bit host-state fields. */
427 VMX_VMCS64_HOST_PAT_FULL,
428 VMX_VMCS64_HOST_PAT_HIGH,
429 VMX_VMCS64_HOST_EFER_FULL,
430 VMX_VMCS64_HOST_EFER_HIGH,
431 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
432 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
433 VMX_VMCS64_HOST_PKRS_FULL,
434 VMX_VMCS64_HOST_PKRS_HIGH,
435
436 /* 32-bit control fields. */
437 VMX_VMCS32_CTRL_PIN_EXEC,
438 VMX_VMCS32_CTRL_PROC_EXEC,
439 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
440 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
441 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
442 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
443 VMX_VMCS32_CTRL_EXIT,
444 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
445 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
446 VMX_VMCS32_CTRL_ENTRY,
447 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
448 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
449 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
450 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
451 VMX_VMCS32_CTRL_TPR_THRESHOLD,
452 VMX_VMCS32_CTRL_PROC_EXEC2,
453 VMX_VMCS32_CTRL_PLE_GAP,
454 VMX_VMCS32_CTRL_PLE_WINDOW,
455
456 /* 32-bits read-only fields. */
457 VMX_VMCS32_RO_VM_INSTR_ERROR,
458 VMX_VMCS32_RO_EXIT_REASON,
459 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
460 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
461 VMX_VMCS32_RO_IDT_VECTORING_INFO,
462 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
463 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
464 VMX_VMCS32_RO_EXIT_INSTR_INFO,
465
466 /* 32-bit guest-state fields. */
467 VMX_VMCS32_GUEST_ES_LIMIT,
468 VMX_VMCS32_GUEST_CS_LIMIT,
469 VMX_VMCS32_GUEST_SS_LIMIT,
470 VMX_VMCS32_GUEST_DS_LIMIT,
471 VMX_VMCS32_GUEST_FS_LIMIT,
472 VMX_VMCS32_GUEST_GS_LIMIT,
473 VMX_VMCS32_GUEST_LDTR_LIMIT,
474 VMX_VMCS32_GUEST_TR_LIMIT,
475 VMX_VMCS32_GUEST_GDTR_LIMIT,
476 VMX_VMCS32_GUEST_IDTR_LIMIT,
477 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
478 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
479 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
480 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
481 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
482 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
483 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
484 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
485 VMX_VMCS32_GUEST_INT_STATE,
486 VMX_VMCS32_GUEST_ACTIVITY_STATE,
487 VMX_VMCS32_GUEST_SMBASE,
488 VMX_VMCS32_GUEST_SYSENTER_CS,
489 VMX_VMCS32_PREEMPT_TIMER_VALUE,
490
491 /* 32-bit host-state fields. */
492 VMX_VMCS32_HOST_SYSENTER_CS,
493
494 /* Natural-width control fields. */
495 VMX_VMCS_CTRL_CR0_MASK,
496 VMX_VMCS_CTRL_CR4_MASK,
497 VMX_VMCS_CTRL_CR0_READ_SHADOW,
498 VMX_VMCS_CTRL_CR4_READ_SHADOW,
499 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
500 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
501 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
502 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
503
504 /* Natural-width read-only data fields. */
505 VMX_VMCS_RO_EXIT_QUALIFICATION,
506 VMX_VMCS_RO_IO_RCX,
507 VMX_VMCS_RO_IO_RSI,
508 VMX_VMCS_RO_IO_RDI,
509 VMX_VMCS_RO_IO_RIP,
510 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
511
512 /* Natural-width guest-state field */
513 VMX_VMCS_GUEST_CR0,
514 VMX_VMCS_GUEST_CR3,
515 VMX_VMCS_GUEST_CR4,
516 VMX_VMCS_GUEST_ES_BASE,
517 VMX_VMCS_GUEST_CS_BASE,
518 VMX_VMCS_GUEST_SS_BASE,
519 VMX_VMCS_GUEST_DS_BASE,
520 VMX_VMCS_GUEST_FS_BASE,
521 VMX_VMCS_GUEST_GS_BASE,
522 VMX_VMCS_GUEST_LDTR_BASE,
523 VMX_VMCS_GUEST_TR_BASE,
524 VMX_VMCS_GUEST_GDTR_BASE,
525 VMX_VMCS_GUEST_IDTR_BASE,
526 VMX_VMCS_GUEST_DR7,
527 VMX_VMCS_GUEST_RSP,
528 VMX_VMCS_GUEST_RIP,
529 VMX_VMCS_GUEST_RFLAGS,
530 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
531 VMX_VMCS_GUEST_SYSENTER_ESP,
532 VMX_VMCS_GUEST_SYSENTER_EIP,
533 VMX_VMCS_GUEST_S_CET,
534 VMX_VMCS_GUEST_SSP,
535 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
536
537 /* Natural-width host-state fields */
538 VMX_VMCS_HOST_CR0,
539 VMX_VMCS_HOST_CR3,
540 VMX_VMCS_HOST_CR4,
541 VMX_VMCS_HOST_FS_BASE,
542 VMX_VMCS_HOST_GS_BASE,
543 VMX_VMCS_HOST_TR_BASE,
544 VMX_VMCS_HOST_GDTR_BASE,
545 VMX_VMCS_HOST_IDTR_BASE,
546 VMX_VMCS_HOST_SYSENTER_ESP,
547 VMX_VMCS_HOST_SYSENTER_EIP,
548 VMX_VMCS_HOST_RSP,
549 VMX_VMCS_HOST_RIP,
550 VMX_VMCS_HOST_S_CET,
551 VMX_VMCS_HOST_SSP,
552 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
553};
554#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
555
556#ifdef VBOX_STRICT
557static const uint32_t g_aVmcsSegBase[] =
558{
559 VMX_VMCS_GUEST_ES_BASE,
560 VMX_VMCS_GUEST_CS_BASE,
561 VMX_VMCS_GUEST_SS_BASE,
562 VMX_VMCS_GUEST_DS_BASE,
563 VMX_VMCS_GUEST_FS_BASE,
564 VMX_VMCS_GUEST_GS_BASE
565};
566static const uint32_t g_aVmcsSegSel[] =
567{
568 VMX_VMCS16_GUEST_ES_SEL,
569 VMX_VMCS16_GUEST_CS_SEL,
570 VMX_VMCS16_GUEST_SS_SEL,
571 VMX_VMCS16_GUEST_DS_SEL,
572 VMX_VMCS16_GUEST_FS_SEL,
573 VMX_VMCS16_GUEST_GS_SEL
574};
575static const uint32_t g_aVmcsSegLimit[] =
576{
577 VMX_VMCS32_GUEST_ES_LIMIT,
578 VMX_VMCS32_GUEST_CS_LIMIT,
579 VMX_VMCS32_GUEST_SS_LIMIT,
580 VMX_VMCS32_GUEST_DS_LIMIT,
581 VMX_VMCS32_GUEST_FS_LIMIT,
582 VMX_VMCS32_GUEST_GS_LIMIT
583};
584static const uint32_t g_aVmcsSegAttr[] =
585{
586 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
587 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
588 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
589 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
590 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
591 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
592};
593AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT);
594AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
595AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT);
596AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT);
597#endif /* VBOX_STRICT */
598
599#ifdef HMVMX_USE_FUNCTION_TABLE
600/**
601 * VMX_EXIT dispatch table.
602 */
603static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
604{
605 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
606 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
607 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
608 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
609 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
610 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
611 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
612 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
613 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
614 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
615 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
616 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
617 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
618 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
619 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
620 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
621 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
622 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
623 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
624#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
625 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
626 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
627 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
628 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
629 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
630 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
631 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
632 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
633 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
634#else
635 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
636 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
637 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
638 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
639 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
640 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
641 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
642 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
643 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
644#endif
645 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
646 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
647 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
648 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
649 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
650 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
651 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
652 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
653 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
654 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
655 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
656 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
657 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
658 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
659 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
660 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
661 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
662 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
663 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
664 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
665 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
666 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
667#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT)
668 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
669#else
670 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
671#endif
672 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
673 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
674#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
675 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
676#else
677 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
678#endif
679 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
680 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
681 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
682 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
683 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
684 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
685 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
686 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
687 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
688 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
689 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
690 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
691 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
692 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
693 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
694 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
695};
696#endif /* HMVMX_USE_FUNCTION_TABLE */
697
698#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
699static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
700{
701 /* 0 */ "(Not Used)",
702 /* 1 */ "VMCALL executed in VMX root operation.",
703 /* 2 */ "VMCLEAR with invalid physical address.",
704 /* 3 */ "VMCLEAR with VMXON pointer.",
705 /* 4 */ "VMLAUNCH with non-clear VMCS.",
706 /* 5 */ "VMRESUME with non-launched VMCS.",
707 /* 6 */ "VMRESUME after VMXOFF",
708 /* 7 */ "VM-entry with invalid control fields.",
709 /* 8 */ "VM-entry with invalid host state fields.",
710 /* 9 */ "VMPTRLD with invalid physical address.",
711 /* 10 */ "VMPTRLD with VMXON pointer.",
712 /* 11 */ "VMPTRLD with incorrect revision identifier.",
713 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
714 /* 13 */ "VMWRITE to read-only VMCS component.",
715 /* 14 */ "(Not Used)",
716 /* 15 */ "VMXON executed in VMX root operation.",
717 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
718 /* 17 */ "VM-entry with non-launched executing VMCS.",
719 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
720 /* 19 */ "VMCALL with non-clear VMCS.",
721 /* 20 */ "VMCALL with invalid VM-exit control fields.",
722 /* 21 */ "(Not Used)",
723 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
724 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
725 /* 24 */ "VMCALL with invalid SMM-monitor features.",
726 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
727 /* 26 */ "VM-entry with events blocked by MOV SS.",
728 /* 27 */ "(Not Used)",
729 /* 28 */ "Invalid operand to INVEPT/INVVPID."
730};
731#endif /* VBOX_STRICT && LOG_ENABLED */
732
733
734/**
735 * Gets the CR0 guest/host mask.
736 *
737 * These bits typically does not change through the lifetime of a VM. Any bit set in
738 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
739 * by the guest.
740 *
741 * @returns The CR0 guest/host mask.
742 * @param pVCpu The cross context virtual CPU structure.
743 */
744static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
745{
746 /*
747 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
748 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
749 *
750 * Furthermore, modifications to any bits that are reserved/unspecified currently
751 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
752 * when future CPUs specify and use currently reserved/unspecified bits.
753 */
754 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
755 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
756 * and @bugref{6944}. */
757 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
758 return ( X86_CR0_PE
759 | X86_CR0_NE
760 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
761 | X86_CR0_PG
762 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
763}
764
765
766/**
767 * Gets the CR4 guest/host mask.
768 *
769 * These bits typically does not change through the lifetime of a VM. Any bit set in
770 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
771 * by the guest.
772 *
773 * @returns The CR4 guest/host mask.
774 * @param pVCpu The cross context virtual CPU structure.
775 */
776static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
777{
778 /*
779 * We construct a mask of all CR4 bits that the guest can modify without causing
780 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
781 * a VM-exit when the guest attempts to modify them when executing using
782 * hardware-assisted VMX.
783 *
784 * When a feature is not exposed to the guest (and may be present on the host),
785 * we want to intercept guest modifications to the bit so we can emulate proper
786 * behavior (e.g., #GP).
787 *
788 * Furthermore, only modifications to those bits that don't require immediate
789 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
790 * depends on CR3 which might not always be the guest value while executing
791 * using hardware-assisted VMX.
792 */
793 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
794 bool const fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
795 bool const fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
796 bool const fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
797
798 /*
799 * Paranoia.
800 * Ensure features exposed to the guest are present on the host.
801 */
802 Assert(!fFsGsBase || pVM->cpum.ro.HostFeatures.fFsGsBase);
803 Assert(!fXSaveRstor || pVM->cpum.ro.HostFeatures.fXSaveRstor);
804 Assert(!fFxSaveRstor || pVM->cpum.ro.HostFeatures.fFxSaveRstor);
805
806 uint64_t const fGstMask = ( X86_CR4_PVI
807 | X86_CR4_TSD
808 | X86_CR4_DE
809 | X86_CR4_MCE
810 | X86_CR4_PCE
811 | X86_CR4_OSXMMEEXCPT
812 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
813 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
814 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0));
815 return ~fGstMask;
816}
817
818
819/**
820 * Adds one or more exceptions to the exception bitmap and commits it to the current
821 * VMCS.
822 *
823 * @param pVCpu The cross context virtual CPU structure.
824 * @param pVmxTransient The VMX-transient structure.
825 * @param uXcptMask The exception(s) to add.
826 */
827static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
828{
829 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
830 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
831 if ((uXcptBitmap & uXcptMask) != uXcptMask)
832 {
833 uXcptBitmap |= uXcptMask;
834 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
835 AssertRC(rc);
836 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
837 }
838}
839
840
841/**
842 * Adds an exception to the exception bitmap and commits it to the current VMCS.
843 *
844 * @param pVCpu The cross context virtual CPU structure.
845 * @param pVmxTransient The VMX-transient structure.
846 * @param uXcpt The exception to add.
847 */
848static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
849{
850 Assert(uXcpt <= X86_XCPT_LAST);
851 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
852}
853
854
855/**
856 * Remove one or more exceptions from the exception bitmap and commits it to the
857 * current VMCS.
858 *
859 * This takes care of not removing the exception intercept if a nested-guest
860 * requires the exception to be intercepted.
861 *
862 * @returns VBox status code.
863 * @param pVCpu The cross context virtual CPU structure.
864 * @param pVmxTransient The VMX-transient structure.
865 * @param uXcptMask The exception(s) to remove.
866 */
867static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
868{
869 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
870 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
871 if (u32XcptBitmap & uXcptMask)
872 {
873#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
874 if (!pVmxTransient->fIsNestedGuest)
875 { /* likely */ }
876 else
877 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
878#endif
879#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
880 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
881 | RT_BIT(X86_XCPT_DE)
882 | RT_BIT(X86_XCPT_NM)
883 | RT_BIT(X86_XCPT_TS)
884 | RT_BIT(X86_XCPT_UD)
885 | RT_BIT(X86_XCPT_NP)
886 | RT_BIT(X86_XCPT_SS)
887 | RT_BIT(X86_XCPT_GP)
888 | RT_BIT(X86_XCPT_PF)
889 | RT_BIT(X86_XCPT_MF));
890#elif defined(HMVMX_ALWAYS_TRAP_PF)
891 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
892#endif
893 if (uXcptMask)
894 {
895 /* Validate we are not removing any essential exception intercepts. */
896#ifndef IN_NEM_DARWIN
897 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
898#else
899 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
900#endif
901 NOREF(pVCpu);
902 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
903 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
904
905 /* Remove it from the exception bitmap. */
906 u32XcptBitmap &= ~uXcptMask;
907
908 /* Commit and update the cache if necessary. */
909 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
910 {
911 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
912 AssertRC(rc);
913 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
914 }
915 }
916 }
917 return VINF_SUCCESS;
918}
919
920
921/**
922 * Remove an exceptions from the exception bitmap and commits it to the current
923 * VMCS.
924 *
925 * @returns VBox status code.
926 * @param pVCpu The cross context virtual CPU structure.
927 * @param pVmxTransient The VMX-transient structure.
928 * @param uXcpt The exception to remove.
929 */
930static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
931{
932 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
933}
934
935
936#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
937/**
938 * Loads the shadow VMCS specified by the VMCS info. object.
939 *
940 * @returns VBox status code.
941 * @param pVmcsInfo The VMCS info. object.
942 *
943 * @remarks Can be called with interrupts disabled.
944 */
945static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
946{
947 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
948 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
949
950 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
951 if (RT_SUCCESS(rc))
952 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
953 return rc;
954}
955
956
957/**
958 * Clears the shadow VMCS specified by the VMCS info. object.
959 *
960 * @returns VBox status code.
961 * @param pVmcsInfo The VMCS info. object.
962 *
963 * @remarks Can be called with interrupts disabled.
964 */
965static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
966{
967 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
968 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
969
970 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
971 if (RT_SUCCESS(rc))
972 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
973 return rc;
974}
975
976
977/**
978 * Switches from and to the specified VMCSes.
979 *
980 * @returns VBox status code.
981 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
982 * @param pVmcsInfoTo The VMCS info. object we are switching to.
983 *
984 * @remarks Called with interrupts disabled.
985 */
986static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
987{
988 /*
989 * Clear the VMCS we are switching out if it has not already been cleared.
990 * This will sync any CPU internal data back to the VMCS.
991 */
992 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
993 {
994 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
995 if (RT_SUCCESS(rc))
996 {
997 /*
998 * The shadow VMCS, if any, would not be active at this point since we
999 * would have cleared it while importing the virtual hardware-virtualization
1000 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
1001 * clear the shadow VMCS here, just assert for safety.
1002 */
1003 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
1004 }
1005 else
1006 return rc;
1007 }
1008
1009 /*
1010 * Clear the VMCS we are switching to if it has not already been cleared.
1011 * This will initialize the VMCS launch state to "clear" required for loading it.
1012 *
1013 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1014 */
1015 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1016 {
1017 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1018 if (RT_SUCCESS(rc))
1019 { /* likely */ }
1020 else
1021 return rc;
1022 }
1023
1024 /*
1025 * Finally, load the VMCS we are switching to.
1026 */
1027 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1028}
1029
1030
1031/**
1032 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1033 * caller.
1034 *
1035 * @returns VBox status code.
1036 * @param pVCpu The cross context virtual CPU structure.
1037 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1038 * true) or guest VMCS (pass false).
1039 */
1040static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1041{
1042 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1043 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1044
1045 PVMXVMCSINFO pVmcsInfoFrom;
1046 PVMXVMCSINFO pVmcsInfoTo;
1047 if (fSwitchToNstGstVmcs)
1048 {
1049 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1050 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1051 }
1052 else
1053 {
1054 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1055 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1056 }
1057
1058 /*
1059 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1060 * preemption hook code path acquires the current VMCS.
1061 */
1062 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1063
1064 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1065 if (RT_SUCCESS(rc))
1066 {
1067 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1068 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1069
1070 /*
1071 * If we are switching to a VMCS that was executed on a different host CPU or was
1072 * never executed before, flag that we need to export the host state before executing
1073 * guest/nested-guest code using hardware-assisted VMX.
1074 *
1075 * This could probably be done in a preemptible context since the preemption hook
1076 * will flag the necessary change in host context. However, since preemption is
1077 * already disabled and to avoid making assumptions about host specific code in
1078 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1079 * disabled.
1080 */
1081 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1082 { /* likely */ }
1083 else
1084 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1085
1086 ASMSetFlags(fEFlags);
1087
1088 /*
1089 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1090 * flag that we need to update the host MSR values there. Even if we decide in the
1091 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1092 * if its content differs, we would have to update the host MSRs anyway.
1093 */
1094 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1095 }
1096 else
1097 ASMSetFlags(fEFlags);
1098 return rc;
1099}
1100#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1101
1102
1103#ifdef VBOX_STRICT
1104/**
1105 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1106 * transient structure.
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @param pVmxTransient The VMX-transient structure.
1110 */
1111DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1112{
1113 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1114 AssertRC(rc);
1115}
1116
1117
1118/**
1119 * Reads the VM-entry exception error code field from the VMCS into
1120 * the VMX transient structure.
1121 *
1122 * @param pVCpu The cross context virtual CPU structure.
1123 * @param pVmxTransient The VMX-transient structure.
1124 */
1125DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1126{
1127 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1128 AssertRC(rc);
1129}
1130
1131
1132/**
1133 * Reads the VM-entry exception error code field from the VMCS into
1134 * the VMX transient structure.
1135 *
1136 * @param pVCpu The cross context virtual CPU structure.
1137 * @param pVmxTransient The VMX-transient structure.
1138 */
1139DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1140{
1141 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1142 AssertRC(rc);
1143}
1144#endif /* VBOX_STRICT */
1145
1146
1147/**
1148 * Reads the VM-exit interruption-information field from the VMCS into the VMX
1149 * transient structure.
1150 *
1151 * @param pVCpu The cross context virtual CPU structure.
1152 * @param pVmxTransient The VMX-transient structure.
1153 */
1154DECLINLINE(void) vmxHCReadExitIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1155{
1156 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1157 {
1158 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1159 AssertRC(rc);
1160 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
1161 }
1162}
1163
1164
1165/**
1166 * Reads the VM-exit interruption error code from the VMCS into the VMX
1167 * transient structure.
1168 *
1169 * @param pVCpu The cross context virtual CPU structure.
1170 * @param pVmxTransient The VMX-transient structure.
1171 */
1172DECLINLINE(void) vmxHCReadExitIntErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1173{
1174 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1175 {
1176 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1177 AssertRC(rc);
1178 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
1179 }
1180}
1181
1182
1183/**
1184 * Reads the VM-exit instruction length field from the VMCS into the VMX
1185 * transient structure.
1186 *
1187 * @param pVCpu The cross context virtual CPU structure.
1188 * @param pVmxTransient The VMX-transient structure.
1189 */
1190DECLINLINE(void) vmxHCReadExitInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1191{
1192 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1193 {
1194 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1195 AssertRC(rc);
1196 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
1197 }
1198}
1199
1200
1201/**
1202 * Reads the VM-exit instruction-information field from the VMCS into
1203 * the VMX transient structure.
1204 *
1205 * @param pVCpu The cross context virtual CPU structure.
1206 * @param pVmxTransient The VMX-transient structure.
1207 */
1208DECLINLINE(void) vmxHCReadExitInstrInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1209{
1210 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1211 {
1212 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1213 AssertRC(rc);
1214 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
1215 }
1216}
1217
1218
1219/**
1220 * Reads the Exit Qualification from the VMCS into the VMX transient structure.
1221 *
1222 * @param pVCpu The cross context virtual CPU structure.
1223 * @param pVmxTransient The VMX-transient structure.
1224 */
1225DECLINLINE(void) vmxHCReadExitQualVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1226{
1227 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1228 {
1229 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1230 AssertRC(rc);
1231 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
1232 }
1233}
1234
1235
1236/**
1237 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
1238 *
1239 * @param pVCpu The cross context virtual CPU structure.
1240 * @param pVmxTransient The VMX-transient structure.
1241 */
1242DECLINLINE(void) vmxHCReadGuestLinearAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1243{
1244 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1245 {
1246 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1247 AssertRC(rc);
1248 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
1249 }
1250}
1251
1252
1253/**
1254 * Reads the Guest-physical address from the VMCS into the VMX transient structure.
1255 *
1256 * @param pVCpu The cross context virtual CPU structure.
1257 * @param pVmxTransient The VMX-transient structure.
1258 */
1259DECLINLINE(void) vmxHCReadGuestPhysicalAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1260{
1261 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1262 {
1263 int rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1264 AssertRC(rc);
1265 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PHYSICAL_ADDR;
1266 }
1267}
1268
1269#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1270/**
1271 * Reads the Guest pending-debug exceptions from the VMCS into the VMX transient
1272 * structure.
1273 *
1274 * @param pVCpu The cross context virtual CPU structure.
1275 * @param pVmxTransient The VMX-transient structure.
1276 */
1277DECLINLINE(void) vmxHCReadGuestPendingDbgXctps(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1278{
1279 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1280 {
1281 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1282 AssertRC(rc);
1283 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PENDING_DBG_XCPTS;
1284 }
1285}
1286#endif
1287
1288/**
1289 * Reads the IDT-vectoring information field from the VMCS into the VMX
1290 * transient structure.
1291 *
1292 * @param pVCpu The cross context virtual CPU structure.
1293 * @param pVmxTransient The VMX-transient structure.
1294 *
1295 * @remarks No-long-jump zone!!!
1296 */
1297DECLINLINE(void) vmxHCReadIdtVectoringInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1298{
1299 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1300 {
1301 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1302 AssertRC(rc);
1303 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
1304 }
1305}
1306
1307
1308/**
1309 * Reads the IDT-vectoring error code from the VMCS into the VMX
1310 * transient structure.
1311 *
1312 * @param pVCpu The cross context virtual CPU structure.
1313 * @param pVmxTransient The VMX-transient structure.
1314 */
1315DECLINLINE(void) vmxHCReadIdtVectoringErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1316{
1317 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1318 {
1319 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1320 AssertRC(rc);
1321 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
1322 }
1323}
1324
1325#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1326/**
1327 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1328 *
1329 * @param pVCpu The cross context virtual CPU structure.
1330 * @param pVmxTransient The VMX-transient structure.
1331 */
1332static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1333{
1334 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1335 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1336 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1337 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1338 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1339 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1340 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1341 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1342 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1343 AssertRC(rc);
1344 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1345 | HMVMX_READ_EXIT_INSTR_LEN
1346 | HMVMX_READ_EXIT_INSTR_INFO
1347 | HMVMX_READ_IDT_VECTORING_INFO
1348 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1349 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1350 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1351 | HMVMX_READ_GUEST_LINEAR_ADDR
1352 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1353}
1354#endif
1355
1356/**
1357 * Verifies that our cached values of the VMCS fields are all consistent with
1358 * what's actually present in the VMCS.
1359 *
1360 * @returns VBox status code.
1361 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1362 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1363 * VMCS content. HMCPU error-field is
1364 * updated, see VMX_VCI_XXX.
1365 * @param pVCpu The cross context virtual CPU structure.
1366 * @param pVmcsInfo The VMCS info. object.
1367 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1368 */
1369static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1370{
1371 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1372
1373 uint32_t u32Val;
1374 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1375 AssertRC(rc);
1376 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1377 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1378 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1379 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1380
1381 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1382 AssertRC(rc);
1383 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1384 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1385 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1386 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1387
1388 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1389 AssertRC(rc);
1390 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1391 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1392 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1393 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1394
1395 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1396 AssertRC(rc);
1397 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1398 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1399 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1400 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1401
1402 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1403 {
1404 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1405 AssertRC(rc);
1406 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1407 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1408 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1409 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1410 }
1411
1412 uint64_t u64Val;
1413 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1414 {
1415 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1416 AssertRC(rc);
1417 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1418 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1419 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1420 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1421 }
1422
1423 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1424 AssertRC(rc);
1425 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1426 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1427 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1428 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1429
1430 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1431 AssertRC(rc);
1432 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1433 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1434 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1435 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1436
1437 NOREF(pcszVmcs);
1438 return VINF_SUCCESS;
1439}
1440
1441
1442/**
1443 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1444 * VMCS.
1445 *
1446 * This is typically required when the guest changes paging mode.
1447 *
1448 * @returns VBox status code.
1449 * @param pVCpu The cross context virtual CPU structure.
1450 * @param pVmxTransient The VMX-transient structure.
1451 *
1452 * @remarks Requires EFER.
1453 * @remarks No-long-jump zone!!!
1454 */
1455static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1456{
1457 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1458 {
1459 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1460 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1461
1462 /*
1463 * VM-entry controls.
1464 */
1465 {
1466 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1467 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1468
1469 /*
1470 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1471 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1472 *
1473 * For nested-guests, this is a mandatory VM-entry control. It's also
1474 * required because we do not want to leak host bits to the nested-guest.
1475 */
1476 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1477
1478 /*
1479 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1480 *
1481 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1482 * required to get the nested-guest working with hardware-assisted VMX execution.
1483 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1484 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1485 * here rather than while merging the guest VMCS controls.
1486 */
1487 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1488 {
1489 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1490 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1491 }
1492 else
1493 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1494
1495 /*
1496 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1497 *
1498 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1499 * regardless of whether the nested-guest VMCS specifies it because we are free to
1500 * load whatever MSRs we require and we do not need to modify the guest visible copy
1501 * of the VM-entry MSR load area.
1502 */
1503 if ( g_fHmVmxSupportsVmcsEfer
1504#ifndef IN_NEM_DARWIN
1505 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1506#endif
1507 )
1508 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1509 else
1510 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1511
1512 /*
1513 * The following should -not- be set (since we're not in SMM mode):
1514 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1515 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1516 */
1517
1518 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1519 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1520
1521 if ((fVal & fZap) == fVal)
1522 { /* likely */ }
1523 else
1524 {
1525 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1526 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1527 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1528 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1529 }
1530
1531 /* Commit it to the VMCS. */
1532 if (pVmcsInfo->u32EntryCtls != fVal)
1533 {
1534 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1535 AssertRC(rc);
1536 pVmcsInfo->u32EntryCtls = fVal;
1537 }
1538 }
1539
1540 /*
1541 * VM-exit controls.
1542 */
1543 {
1544 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1545 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1546
1547 /*
1548 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1549 * supported the 1-setting of this bit.
1550 *
1551 * For nested-guests, we set the "save debug controls" as the converse
1552 * "load debug controls" is mandatory for nested-guests anyway.
1553 */
1554 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1555
1556 /*
1557 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1558 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1559 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1560 * vmxHCExportHostMsrs().
1561 *
1562 * For nested-guests, we always set this bit as we do not support 32-bit
1563 * hosts.
1564 */
1565 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1566
1567#ifndef IN_NEM_DARWIN
1568 /*
1569 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1570 *
1571 * For nested-guests, we should use the "save IA32_EFER" control if we also
1572 * used the "load IA32_EFER" control while exporting VM-entry controls.
1573 */
1574 if ( g_fHmVmxSupportsVmcsEfer
1575 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1576 {
1577 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1578 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1579 }
1580#endif
1581
1582 /*
1583 * Enable saving of the VMX-preemption timer value on VM-exit.
1584 * For nested-guests, currently not exposed/used.
1585 */
1586 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1587 * the timer value. */
1588 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1589 {
1590 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1591 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1592 }
1593
1594 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1595 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1596
1597 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1598 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1599 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1600
1601 if ((fVal & fZap) == fVal)
1602 { /* likely */ }
1603 else
1604 {
1605 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1606 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1607 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1608 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1609 }
1610
1611 /* Commit it to the VMCS. */
1612 if (pVmcsInfo->u32ExitCtls != fVal)
1613 {
1614 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1615 AssertRC(rc);
1616 pVmcsInfo->u32ExitCtls = fVal;
1617 }
1618 }
1619
1620 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1621 }
1622 return VINF_SUCCESS;
1623}
1624
1625
1626/**
1627 * Sets the TPR threshold in the VMCS.
1628 *
1629 * @param pVCpu The cross context virtual CPU structure.
1630 * @param pVmcsInfo The VMCS info. object.
1631 * @param u32TprThreshold The TPR threshold (task-priority class only).
1632 */
1633DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1634{
1635 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1636 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1637 RT_NOREF(pVmcsInfo);
1638 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1639 AssertRC(rc);
1640}
1641
1642
1643/**
1644 * Exports the guest APIC TPR state into the VMCS.
1645 *
1646 * @param pVCpu The cross context virtual CPU structure.
1647 * @param pVmxTransient The VMX-transient structure.
1648 *
1649 * @remarks No-long-jump zone!!!
1650 */
1651static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1652{
1653 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1654 {
1655 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1656
1657 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1658 if (!pVmxTransient->fIsNestedGuest)
1659 {
1660 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1661 && APICIsEnabled(pVCpu))
1662 {
1663 /*
1664 * Setup TPR shadowing.
1665 */
1666 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1667 {
1668 bool fPendingIntr = false;
1669 uint8_t u8Tpr = 0;
1670 uint8_t u8PendingIntr = 0;
1671 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1672 AssertRC(rc);
1673
1674 /*
1675 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1676 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1677 * priority of the pending interrupt so we can deliver the interrupt. If there
1678 * are no interrupts pending, set threshold to 0 to not cause any
1679 * TPR-below-threshold VM-exits.
1680 */
1681 uint32_t u32TprThreshold = 0;
1682 if (fPendingIntr)
1683 {
1684 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1685 (which is the Task-Priority Class). */
1686 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1687 const uint8_t u8TprPriority = u8Tpr >> 4;
1688 if (u8PendingPriority <= u8TprPriority)
1689 u32TprThreshold = u8PendingPriority;
1690 }
1691
1692 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1693 }
1694 }
1695 }
1696 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1697 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1698 }
1699}
1700
1701
1702/**
1703 * Gets the guest interruptibility-state and updates related force-flags.
1704 *
1705 * @returns Guest's interruptibility-state.
1706 * @param pVCpu The cross context virtual CPU structure.
1707 *
1708 * @remarks No-long-jump zone!!!
1709 */
1710static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1711{
1712 /*
1713 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1714 */
1715 uint32_t fIntrState = 0;
1716 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1717 {
1718 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
1719 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
1720
1721 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1722 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
1723 {
1724 if (pCtx->eflags.Bits.u1IF)
1725 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1726 else
1727 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1728 }
1729 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1730 {
1731 /*
1732 * We can clear the inhibit force flag as even if we go back to the recompiler
1733 * without executing guest code in VT-x, the flag's condition to be cleared is
1734 * met and thus the cleared state is correct.
1735 */
1736 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1737 }
1738 }
1739
1740 /*
1741 * Check if we should inhibit NMI delivery.
1742 */
1743 if (CPUMIsGuestNmiBlocking(pVCpu))
1744 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1745
1746 /*
1747 * Validate.
1748 */
1749#ifdef VBOX_STRICT
1750 /* We don't support block-by-SMI yet.*/
1751 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1752
1753 /* Block-by-STI must not be set when interrupts are disabled. */
1754 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
1755 {
1756 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1757 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
1758 }
1759#endif
1760
1761 return fIntrState;
1762}
1763
1764
1765/**
1766 * Exports the exception intercepts required for guest execution in the VMCS.
1767 *
1768 * @param pVCpu The cross context virtual CPU structure.
1769 * @param pVmxTransient The VMX-transient structure.
1770 *
1771 * @remarks No-long-jump zone!!!
1772 */
1773static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1774{
1775 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1776 {
1777 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1778 if ( !pVmxTransient->fIsNestedGuest
1779 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1780 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1781 else
1782 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1783
1784 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1785 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1786 }
1787}
1788
1789
1790/**
1791 * Exports the guest's RIP into the guest-state area in the VMCS.
1792 *
1793 * @param pVCpu The cross context virtual CPU structure.
1794 *
1795 * @remarks No-long-jump zone!!!
1796 */
1797static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1798{
1799 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1800 {
1801 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1802
1803 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1804 AssertRC(rc);
1805
1806 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1807 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1808 }
1809}
1810
1811
1812/**
1813 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1814 *
1815 * @param pVCpu The cross context virtual CPU structure.
1816 * @param pVmxTransient The VMX-transient structure.
1817 *
1818 * @remarks No-long-jump zone!!!
1819 */
1820static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1821{
1822 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1823 {
1824 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1825
1826 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1827 Let us assert it as such and use 32-bit VMWRITE. */
1828 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1829 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1830 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
1831 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
1832
1833#ifndef IN_NEM_DARWIN
1834 /*
1835 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1836 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1837 * can run the real-mode guest code under Virtual 8086 mode.
1838 */
1839 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1840 if (pVmcsInfo->RealMode.fRealOnV86Active)
1841 {
1842 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1843 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1844 Assert(!pVmxTransient->fIsNestedGuest);
1845 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
1846 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
1847 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1848 }
1849#else
1850 RT_NOREF(pVmxTransient);
1851#endif
1852
1853 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
1854 AssertRC(rc);
1855
1856 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1857 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
1858 }
1859}
1860
1861
1862#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1863/**
1864 * Copies the nested-guest VMCS to the shadow VMCS.
1865 *
1866 * @returns VBox status code.
1867 * @param pVCpu The cross context virtual CPU structure.
1868 * @param pVmcsInfo The VMCS info. object.
1869 *
1870 * @remarks No-long-jump zone!!!
1871 */
1872static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1873{
1874 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1875 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1876
1877 /*
1878 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1879 * current VMCS, as we may try saving guest lazy MSRs.
1880 *
1881 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1882 * calling the import VMCS code which is currently performing the guest MSR reads
1883 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1884 * and the rest of the VMX leave session machinery.
1885 */
1886 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1887
1888 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1889 if (RT_SUCCESS(rc))
1890 {
1891 /*
1892 * Copy all guest read/write VMCS fields.
1893 *
1894 * We don't check for VMWRITE failures here for performance reasons and
1895 * because they are not expected to fail, barring irrecoverable conditions
1896 * like hardware errors.
1897 */
1898 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1899 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1900 {
1901 uint64_t u64Val;
1902 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1903 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1904 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1905 }
1906
1907 /*
1908 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1909 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1910 */
1911 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1912 {
1913 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1914 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1915 {
1916 uint64_t u64Val;
1917 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1918 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1919 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1920 }
1921 }
1922
1923 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1924 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1925 }
1926
1927 ASMSetFlags(fEFlags);
1928 return rc;
1929}
1930
1931
1932/**
1933 * Copies the shadow VMCS to the nested-guest VMCS.
1934 *
1935 * @returns VBox status code.
1936 * @param pVCpu The cross context virtual CPU structure.
1937 * @param pVmcsInfo The VMCS info. object.
1938 *
1939 * @remarks Called with interrupts disabled.
1940 */
1941static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1942{
1943 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1944 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1945 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1946
1947 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1948 if (RT_SUCCESS(rc))
1949 {
1950 /*
1951 * Copy guest read/write fields from the shadow VMCS.
1952 * Guest read-only fields cannot be modified, so no need to copy them.
1953 *
1954 * We don't check for VMREAD failures here for performance reasons and
1955 * because they are not expected to fail, barring irrecoverable conditions
1956 * like hardware errors.
1957 */
1958 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1959 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1960 {
1961 uint64_t u64Val;
1962 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1963 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1964 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1965 }
1966
1967 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1968 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1969 }
1970 return rc;
1971}
1972
1973
1974/**
1975 * Enables VMCS shadowing for the given VMCS info. object.
1976 *
1977 * @param pVCpu The cross context virtual CPU structure.
1978 * @param pVmcsInfo The VMCS info. object.
1979 *
1980 * @remarks No-long-jump zone!!!
1981 */
1982static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1983{
1984 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1985 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1986 {
1987 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1988 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1989 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1990 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1991 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1992 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1993 Log4Func(("Enabled\n"));
1994 }
1995}
1996
1997
1998/**
1999 * Disables VMCS shadowing for the given VMCS info. object.
2000 *
2001 * @param pVCpu The cross context virtual CPU structure.
2002 * @param pVmcsInfo The VMCS info. object.
2003 *
2004 * @remarks No-long-jump zone!!!
2005 */
2006static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2007{
2008 /*
2009 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
2010 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
2011 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2012 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2013 *
2014 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2015 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2016 */
2017 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2018 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2019 {
2020 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2021 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2022 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2023 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2024 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2025 Log4Func(("Disabled\n"));
2026 }
2027}
2028#endif
2029
2030
2031/**
2032 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2033 *
2034 * The guest FPU state is always pre-loaded hence we don't need to bother about
2035 * sharing FPU related CR0 bits between the guest and host.
2036 *
2037 * @returns VBox status code.
2038 * @param pVCpu The cross context virtual CPU structure.
2039 * @param pVmxTransient The VMX-transient structure.
2040 *
2041 * @remarks No-long-jump zone!!!
2042 */
2043static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2044{
2045 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2046 {
2047 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2048 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2049
2050 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2051 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2052 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2053 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2054 else
2055 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2056
2057 if (!pVmxTransient->fIsNestedGuest)
2058 {
2059 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2060 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2061 uint64_t const u64ShadowCr0 = u64GuestCr0;
2062 Assert(!RT_HI_U32(u64GuestCr0));
2063
2064 /*
2065 * Setup VT-x's view of the guest CR0.
2066 */
2067 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2068 if (VM_IS_VMX_NESTED_PAGING(pVM))
2069 {
2070#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2071 if (CPUMIsGuestPagingEnabled(pVCpu))
2072 {
2073 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2074 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2075 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2076 }
2077 else
2078 {
2079 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2080 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2081 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2082 }
2083
2084 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2085 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2086 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2087#endif
2088 }
2089 else
2090 {
2091 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2092 u64GuestCr0 |= X86_CR0_WP;
2093 }
2094
2095 /*
2096 * Guest FPU bits.
2097 *
2098 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2099 * using CR0.TS.
2100 *
2101 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2102 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2103 */
2104 u64GuestCr0 |= X86_CR0_NE;
2105
2106 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2107 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2108
2109 /*
2110 * Update exception intercepts.
2111 */
2112 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2113#ifndef IN_NEM_DARWIN
2114 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2115 {
2116 Assert(PDMVmmDevHeapIsEnabled(pVM));
2117 Assert(pVM->hm.s.vmx.pRealModeTSS);
2118 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2119 }
2120 else
2121#endif
2122 {
2123 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2124 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2125 if (fInterceptMF)
2126 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2127 }
2128
2129 /* Additional intercepts for debugging, define these yourself explicitly. */
2130#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2131 uXcptBitmap |= 0
2132 | RT_BIT(X86_XCPT_BP)
2133 | RT_BIT(X86_XCPT_DE)
2134 | RT_BIT(X86_XCPT_NM)
2135 | RT_BIT(X86_XCPT_TS)
2136 | RT_BIT(X86_XCPT_UD)
2137 | RT_BIT(X86_XCPT_NP)
2138 | RT_BIT(X86_XCPT_SS)
2139 | RT_BIT(X86_XCPT_GP)
2140 | RT_BIT(X86_XCPT_PF)
2141 | RT_BIT(X86_XCPT_MF)
2142 ;
2143#elif defined(HMVMX_ALWAYS_TRAP_PF)
2144 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2145#endif
2146 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2147 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2148 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2149
2150 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2151 u64GuestCr0 |= fSetCr0;
2152 u64GuestCr0 &= fZapCr0;
2153 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2154
2155 /* Commit the CR0 and related fields to the guest VMCS. */
2156 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2157 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2158 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2159 {
2160 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2161 AssertRC(rc);
2162 }
2163 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2164 {
2165 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2166 AssertRC(rc);
2167 }
2168
2169 /* Update our caches. */
2170 pVmcsInfo->u32ProcCtls = uProcCtls;
2171 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2172
2173 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2174 }
2175 else
2176 {
2177 /*
2178 * With nested-guests, we may have extended the guest/host mask here since we
2179 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2180 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2181 * originally supplied. We must copy those bits from the nested-guest CR0 into
2182 * the nested-guest CR0 read-shadow.
2183 */
2184 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2185 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2186 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2187 Assert(!RT_HI_U32(u64GuestCr0));
2188 Assert(u64GuestCr0 & X86_CR0_NE);
2189
2190 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2191 u64GuestCr0 |= fSetCr0;
2192 u64GuestCr0 &= fZapCr0;
2193 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2194
2195 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2196 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2197 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2198
2199 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2200 }
2201
2202 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2203 }
2204
2205 return VINF_SUCCESS;
2206}
2207
2208
2209/**
2210 * Exports the guest control registers (CR3, CR4) into the guest-state area
2211 * in the VMCS.
2212 *
2213 * @returns VBox strict status code.
2214 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2215 * without unrestricted guest access and the VMMDev is not presently
2216 * mapped (e.g. EFI32).
2217 *
2218 * @param pVCpu The cross context virtual CPU structure.
2219 * @param pVmxTransient The VMX-transient structure.
2220 *
2221 * @remarks No-long-jump zone!!!
2222 */
2223static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2224{
2225 int rc = VINF_SUCCESS;
2226 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2227
2228 /*
2229 * Guest CR2.
2230 * It's always loaded in the assembler code. Nothing to do here.
2231 */
2232
2233 /*
2234 * Guest CR3.
2235 */
2236 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2237 {
2238 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2239
2240 if (VM_IS_VMX_NESTED_PAGING(pVM))
2241 {
2242#ifndef IN_NEM_DARWIN
2243 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2244 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2245
2246 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2247 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2248 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2249 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2250
2251 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2252 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2253 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2254
2255 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2256 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2257 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2258 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2259 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2260 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2261 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2262
2263 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2264 AssertRC(rc);
2265#endif
2266
2267 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2268 uint64_t u64GuestCr3 = pCtx->cr3;
2269 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2270 || CPUMIsGuestPagingEnabledEx(pCtx))
2271 {
2272 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2273 if (CPUMIsGuestInPAEModeEx(pCtx))
2274 {
2275 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2276 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2277 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2278 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2279 }
2280
2281 /*
2282 * The guest's view of its CR3 is unblemished with nested paging when the
2283 * guest is using paging or we have unrestricted guest execution to handle
2284 * the guest when it's not using paging.
2285 */
2286 }
2287#ifndef IN_NEM_DARWIN
2288 else
2289 {
2290 /*
2291 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2292 * thinks it accesses physical memory directly, we use our identity-mapped
2293 * page table to map guest-linear to guest-physical addresses. EPT takes care
2294 * of translating it to host-physical addresses.
2295 */
2296 RTGCPHYS GCPhys;
2297 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2298
2299 /* We obtain it here every time as the guest could have relocated this PCI region. */
2300 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2301 if (RT_SUCCESS(rc))
2302 { /* likely */ }
2303 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2304 {
2305 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2306 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2307 }
2308 else
2309 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2310
2311 u64GuestCr3 = GCPhys;
2312 }
2313#endif
2314
2315 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2316 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2317 AssertRC(rc);
2318 }
2319 else
2320 {
2321 Assert(!pVmxTransient->fIsNestedGuest);
2322 /* Non-nested paging case, just use the hypervisor's CR3. */
2323 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2324
2325 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2326 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2327 AssertRC(rc);
2328 }
2329
2330 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2331 }
2332
2333 /*
2334 * Guest CR4.
2335 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2336 */
2337 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2338 {
2339 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2340 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2341
2342 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2343 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2344
2345 /*
2346 * With nested-guests, we may have extended the guest/host mask here (since we
2347 * merged in the outer guest's mask, see vmxHCMergeVmcsNested). This means, the
2348 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2349 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2350 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2351 */
2352 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2353 uint64_t u64GuestCr4 = pCtx->cr4;
2354 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2355 ? pCtx->cr4
2356 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2357 Assert(!RT_HI_U32(u64GuestCr4));
2358
2359#ifndef IN_NEM_DARWIN
2360 /*
2361 * Setup VT-x's view of the guest CR4.
2362 *
2363 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2364 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2365 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2366 *
2367 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2368 */
2369 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2370 {
2371 Assert(pVM->hm.s.vmx.pRealModeTSS);
2372 Assert(PDMVmmDevHeapIsEnabled(pVM));
2373 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2374 }
2375#endif
2376
2377 if (VM_IS_VMX_NESTED_PAGING(pVM))
2378 {
2379 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2380 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2381 {
2382 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2383 u64GuestCr4 |= X86_CR4_PSE;
2384 /* Our identity mapping is a 32-bit page directory. */
2385 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2386 }
2387 /* else use guest CR4.*/
2388 }
2389 else
2390 {
2391 Assert(!pVmxTransient->fIsNestedGuest);
2392
2393 /*
2394 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2395 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2396 */
2397 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2398 {
2399 case PGMMODE_REAL: /* Real-mode. */
2400 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2401 case PGMMODE_32_BIT: /* 32-bit paging. */
2402 {
2403 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2404 break;
2405 }
2406
2407 case PGMMODE_PAE: /* PAE paging. */
2408 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2409 {
2410 u64GuestCr4 |= X86_CR4_PAE;
2411 break;
2412 }
2413
2414 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2415 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2416 {
2417#ifdef VBOX_WITH_64_BITS_GUESTS
2418 /* For our assumption in vmxHCShouldSwapEferMsr. */
2419 Assert(u64GuestCr4 & X86_CR4_PAE);
2420 break;
2421#endif
2422 }
2423 default:
2424 AssertFailed();
2425 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2426 }
2427 }
2428
2429 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2430 u64GuestCr4 |= fSetCr4;
2431 u64GuestCr4 &= fZapCr4;
2432
2433 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2434 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2435 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2436
2437#ifndef IN_NEM_DARWIN
2438 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2439 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2440 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2441 {
2442 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2443 hmR0VmxUpdateStartVmFunction(pVCpu);
2444 }
2445#endif
2446
2447 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2448
2449 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2450 }
2451 return rc;
2452}
2453
2454
2455#ifdef VBOX_STRICT
2456/**
2457 * Strict function to validate segment registers.
2458 *
2459 * @param pVCpu The cross context virtual CPU structure.
2460 * @param pVmcsInfo The VMCS info. object.
2461 *
2462 * @remarks Will import guest CR0 on strict builds during validation of
2463 * segments.
2464 */
2465static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2466{
2467 /*
2468 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2469 *
2470 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2471 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2472 * unusable bit and doesn't change the guest-context value.
2473 */
2474 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2475 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2476 vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2477 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2478 && ( !CPUMIsGuestInRealModeEx(pCtx)
2479 && !CPUMIsGuestInV86ModeEx(pCtx)))
2480 {
2481 /* Protected mode checks */
2482 /* CS */
2483 Assert(pCtx->cs.Attr.n.u1Present);
2484 Assert(!(pCtx->cs.Attr.u & 0xf00));
2485 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2486 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2487 || !(pCtx->cs.Attr.n.u1Granularity));
2488 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2489 || (pCtx->cs.Attr.n.u1Granularity));
2490 /* CS cannot be loaded with NULL in protected mode. */
2491 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2492 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2493 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2494 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2495 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2496 else
2497 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2498 /* SS */
2499 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2500 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2501 if ( !(pCtx->cr0 & X86_CR0_PE)
2502 || pCtx->cs.Attr.n.u4Type == 3)
2503 {
2504 Assert(!pCtx->ss.Attr.n.u2Dpl);
2505 }
2506 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2507 {
2508 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2509 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2510 Assert(pCtx->ss.Attr.n.u1Present);
2511 Assert(!(pCtx->ss.Attr.u & 0xf00));
2512 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2513 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2514 || !(pCtx->ss.Attr.n.u1Granularity));
2515 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2516 || (pCtx->ss.Attr.n.u1Granularity));
2517 }
2518 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2519 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2520 {
2521 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2522 Assert(pCtx->ds.Attr.n.u1Present);
2523 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2524 Assert(!(pCtx->ds.Attr.u & 0xf00));
2525 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2526 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2527 || !(pCtx->ds.Attr.n.u1Granularity));
2528 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2529 || (pCtx->ds.Attr.n.u1Granularity));
2530 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2531 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2532 }
2533 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2534 {
2535 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2536 Assert(pCtx->es.Attr.n.u1Present);
2537 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2538 Assert(!(pCtx->es.Attr.u & 0xf00));
2539 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2540 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2541 || !(pCtx->es.Attr.n.u1Granularity));
2542 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2543 || (pCtx->es.Attr.n.u1Granularity));
2544 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2545 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2546 }
2547 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2548 {
2549 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2550 Assert(pCtx->fs.Attr.n.u1Present);
2551 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2552 Assert(!(pCtx->fs.Attr.u & 0xf00));
2553 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2554 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2555 || !(pCtx->fs.Attr.n.u1Granularity));
2556 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2557 || (pCtx->fs.Attr.n.u1Granularity));
2558 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2559 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2560 }
2561 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2562 {
2563 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2564 Assert(pCtx->gs.Attr.n.u1Present);
2565 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2566 Assert(!(pCtx->gs.Attr.u & 0xf00));
2567 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2568 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2569 || !(pCtx->gs.Attr.n.u1Granularity));
2570 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2571 || (pCtx->gs.Attr.n.u1Granularity));
2572 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2573 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2574 }
2575 /* 64-bit capable CPUs. */
2576 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2577 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2578 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2579 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2580 }
2581 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2582 || ( CPUMIsGuestInRealModeEx(pCtx)
2583 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2584 {
2585 /* Real and v86 mode checks. */
2586 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2587 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2588#ifndef IN_NEM_DARWIN
2589 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2590 {
2591 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2592 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2593 }
2594 else
2595#endif
2596 {
2597 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2598 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2599 }
2600
2601 /* CS */
2602 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2603 Assert(pCtx->cs.u32Limit == 0xffff);
2604 Assert(u32CSAttr == 0xf3);
2605 /* SS */
2606 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2607 Assert(pCtx->ss.u32Limit == 0xffff);
2608 Assert(u32SSAttr == 0xf3);
2609 /* DS */
2610 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2611 Assert(pCtx->ds.u32Limit == 0xffff);
2612 Assert(u32DSAttr == 0xf3);
2613 /* ES */
2614 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2615 Assert(pCtx->es.u32Limit == 0xffff);
2616 Assert(u32ESAttr == 0xf3);
2617 /* FS */
2618 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2619 Assert(pCtx->fs.u32Limit == 0xffff);
2620 Assert(u32FSAttr == 0xf3);
2621 /* GS */
2622 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2623 Assert(pCtx->gs.u32Limit == 0xffff);
2624 Assert(u32GSAttr == 0xf3);
2625 /* 64-bit capable CPUs. */
2626 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2627 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2628 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2629 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2630 }
2631}
2632#endif /* VBOX_STRICT */
2633
2634
2635/**
2636 * Exports a guest segment register into the guest-state area in the VMCS.
2637 *
2638 * @returns VBox status code.
2639 * @param pVCpu The cross context virtual CPU structure.
2640 * @param pVmcsInfo The VMCS info. object.
2641 * @param iSegReg The segment register number (X86_SREG_XXX).
2642 * @param pSelReg Pointer to the segment selector.
2643 *
2644 * @remarks No-long-jump zone!!!
2645 */
2646static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2647{
2648 Assert(iSegReg < X86_SREG_COUNT);
2649
2650 uint32_t u32Access = pSelReg->Attr.u;
2651#ifndef IN_NEM_DARWIN
2652 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2653#endif
2654 {
2655 /*
2656 * The way to differentiate between whether this is really a null selector or was just
2657 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2658 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2659 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2660 * NULL selectors loaded in protected-mode have their attribute as 0.
2661 */
2662 if (u32Access)
2663 { }
2664 else
2665 u32Access = X86DESCATTR_UNUSABLE;
2666 }
2667#ifndef IN_NEM_DARWIN
2668 else
2669 {
2670 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2671 u32Access = 0xf3;
2672 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2673 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2674 RT_NOREF_PV(pVCpu);
2675 }
2676#else
2677 RT_NOREF(pVmcsInfo);
2678#endif
2679
2680 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2681 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2682 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2683
2684 /*
2685 * Commit it to the VMCS.
2686 */
2687 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
2688 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
2689 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
2690 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
2691 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2692 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2693 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2694 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2695 return VINF_SUCCESS;
2696}
2697
2698
2699/**
2700 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2701 * area in the VMCS.
2702 *
2703 * @returns VBox status code.
2704 * @param pVCpu The cross context virtual CPU structure.
2705 * @param pVmxTransient The VMX-transient structure.
2706 *
2707 * @remarks Will import guest CR0 on strict builds during validation of
2708 * segments.
2709 * @remarks No-long-jump zone!!!
2710 */
2711static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2712{
2713 int rc = VERR_INTERNAL_ERROR_5;
2714#ifndef IN_NEM_DARWIN
2715 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2716#endif
2717 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2718 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2719#ifndef IN_NEM_DARWIN
2720 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2721#endif
2722
2723 /*
2724 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2725 */
2726 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2727 {
2728 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2729 {
2730 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2731#ifndef IN_NEM_DARWIN
2732 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2733 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2734#endif
2735 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2736 AssertRC(rc);
2737 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2738 }
2739
2740 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2741 {
2742 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2743#ifndef IN_NEM_DARWIN
2744 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2745 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2746#endif
2747 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2748 AssertRC(rc);
2749 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2750 }
2751
2752 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2753 {
2754 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2755#ifndef IN_NEM_DARWIN
2756 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2757 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2758#endif
2759 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2760 AssertRC(rc);
2761 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2762 }
2763
2764 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2765 {
2766 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2767#ifndef IN_NEM_DARWIN
2768 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2769 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2770#endif
2771 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2772 AssertRC(rc);
2773 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2774 }
2775
2776 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2777 {
2778 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2779#ifndef IN_NEM_DARWIN
2780 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2781 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2782#endif
2783 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2784 AssertRC(rc);
2785 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2786 }
2787
2788 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2789 {
2790 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2791#ifndef IN_NEM_DARWIN
2792 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2793 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2794#endif
2795 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2796 AssertRC(rc);
2797 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2798 }
2799
2800#ifdef VBOX_STRICT
2801 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2802#endif
2803 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2804 pCtx->cs.Attr.u));
2805 }
2806
2807 /*
2808 * Guest TR.
2809 */
2810 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2811 {
2812 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2813
2814 /*
2815 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2816 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2817 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2818 */
2819 uint16_t u16Sel;
2820 uint32_t u32Limit;
2821 uint64_t u64Base;
2822 uint32_t u32AccessRights;
2823#ifndef IN_NEM_DARWIN
2824 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2825#endif
2826 {
2827 u16Sel = pCtx->tr.Sel;
2828 u32Limit = pCtx->tr.u32Limit;
2829 u64Base = pCtx->tr.u64Base;
2830 u32AccessRights = pCtx->tr.Attr.u;
2831 }
2832#ifndef IN_NEM_DARWIN
2833 else
2834 {
2835 Assert(!pVmxTransient->fIsNestedGuest);
2836 Assert(pVM->hm.s.vmx.pRealModeTSS);
2837 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2838
2839 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2840 RTGCPHYS GCPhys;
2841 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2842 AssertRCReturn(rc, rc);
2843
2844 X86DESCATTR DescAttr;
2845 DescAttr.u = 0;
2846 DescAttr.n.u1Present = 1;
2847 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2848
2849 u16Sel = 0;
2850 u32Limit = HM_VTX_TSS_SIZE;
2851 u64Base = GCPhys;
2852 u32AccessRights = DescAttr.u;
2853 }
2854#endif
2855
2856 /* Validate. */
2857 Assert(!(u16Sel & RT_BIT(2)));
2858 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2859 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2860 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2861 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2862 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2863 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2864 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2865 Assert( (u32Limit & 0xfff) == 0xfff
2866 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2867 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2868 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2869
2870 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2871 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2872 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2873 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2874
2875 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2876 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2877 }
2878
2879 /*
2880 * Guest GDTR.
2881 */
2882 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2883 {
2884 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2885
2886 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2887 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2888
2889 /* Validate. */
2890 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2891
2892 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2893 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2894 }
2895
2896 /*
2897 * Guest LDTR.
2898 */
2899 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2900 {
2901 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2902
2903 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2904 uint32_t u32Access;
2905 if ( !pVmxTransient->fIsNestedGuest
2906 && !pCtx->ldtr.Attr.u)
2907 u32Access = X86DESCATTR_UNUSABLE;
2908 else
2909 u32Access = pCtx->ldtr.Attr.u;
2910
2911 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2912 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2913 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2914 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2915
2916 /* Validate. */
2917 if (!(u32Access & X86DESCATTR_UNUSABLE))
2918 {
2919 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2920 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2921 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2922 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2923 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2924 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2925 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2926 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2927 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2928 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2929 }
2930
2931 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2932 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2933 }
2934
2935 /*
2936 * Guest IDTR.
2937 */
2938 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2939 {
2940 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2941
2942 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2943 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2944
2945 /* Validate. */
2946 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2947
2948 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2949 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2950 }
2951
2952 return VINF_SUCCESS;
2953}
2954
2955
2956/**
2957 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2958 * VM-exit interruption info type.
2959 *
2960 * @returns The IEM exception flags.
2961 * @param uVector The event vector.
2962 * @param uVmxEventType The VMX event type.
2963 *
2964 * @remarks This function currently only constructs flags required for
2965 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2966 * and CR2 aspects of an exception are not included).
2967 */
2968static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2969{
2970 uint32_t fIemXcptFlags;
2971 switch (uVmxEventType)
2972 {
2973 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2974 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2975 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2976 break;
2977
2978 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2979 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2980 break;
2981
2982 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2983 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2984 break;
2985
2986 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2987 {
2988 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2989 if (uVector == X86_XCPT_BP)
2990 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2991 else if (uVector == X86_XCPT_OF)
2992 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2993 else
2994 {
2995 fIemXcptFlags = 0;
2996 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2997 }
2998 break;
2999 }
3000
3001 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
3002 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3003 break;
3004
3005 default:
3006 fIemXcptFlags = 0;
3007 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3008 break;
3009 }
3010 return fIemXcptFlags;
3011}
3012
3013
3014/**
3015 * Sets an event as a pending event to be injected into the guest.
3016 *
3017 * @param pVCpu The cross context virtual CPU structure.
3018 * @param u32IntInfo The VM-entry interruption-information field.
3019 * @param cbInstr The VM-entry instruction length in bytes (for
3020 * software interrupts, exceptions and privileged
3021 * software exceptions).
3022 * @param u32ErrCode The VM-entry exception error code.
3023 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3024 * page-fault.
3025 */
3026DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3027 RTGCUINTPTR GCPtrFaultAddress)
3028{
3029 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3030 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3031 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3032 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3033 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3034 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3035}
3036
3037
3038/**
3039 * Sets an external interrupt as pending-for-injection into the VM.
3040 *
3041 * @param pVCpu The cross context virtual CPU structure.
3042 * @param u8Interrupt The external interrupt vector.
3043 */
3044DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3045{
3046 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3047 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3048 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3049 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3050 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3051}
3052
3053
3054/**
3055 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3056 *
3057 * @param pVCpu The cross context virtual CPU structure.
3058 */
3059DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3060{
3061 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3062 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3063 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3064 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3065 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3066}
3067
3068
3069/**
3070 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3071 *
3072 * @param pVCpu The cross context virtual CPU structure.
3073 */
3074DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3075{
3076 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3077 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3078 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3079 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3080 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3081}
3082
3083
3084/**
3085 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3086 *
3087 * @param pVCpu The cross context virtual CPU structure.
3088 */
3089DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3090{
3091 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3092 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3093 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3094 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3095 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3096}
3097
3098
3099/**
3100 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3101 *
3102 * @param pVCpu The cross context virtual CPU structure.
3103 */
3104DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3105{
3106 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3107 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3108 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3109 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3110 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3111}
3112
3113
3114#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3115/**
3116 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3117 *
3118 * @param pVCpu The cross context virtual CPU structure.
3119 * @param u32ErrCode The error code for the general-protection exception.
3120 */
3121DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3122{
3123 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3124 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3125 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3126 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3127 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3128}
3129
3130
3131/**
3132 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3133 *
3134 * @param pVCpu The cross context virtual CPU structure.
3135 * @param u32ErrCode The error code for the stack exception.
3136 */
3137DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3138{
3139 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3140 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3141 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3142 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3143 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3144}
3145#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3146
3147
3148/**
3149 * Fixes up attributes for the specified segment register.
3150 *
3151 * @param pVCpu The cross context virtual CPU structure.
3152 * @param pSelReg The segment register that needs fixing.
3153 * @param pszRegName The register name (for logging and assertions).
3154 */
3155static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3156{
3157 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3158
3159 /*
3160 * If VT-x marks the segment as unusable, most other bits remain undefined:
3161 * - For CS the L, D and G bits have meaning.
3162 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3163 * - For the remaining data segments no bits are defined.
3164 *
3165 * The present bit and the unusable bit has been observed to be set at the
3166 * same time (the selector was supposed to be invalid as we started executing
3167 * a V8086 interrupt in ring-0).
3168 *
3169 * What should be important for the rest of the VBox code, is that the P bit is
3170 * cleared. Some of the other VBox code recognizes the unusable bit, but
3171 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3172 * safe side here, we'll strip off P and other bits we don't care about. If
3173 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3174 *
3175 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3176 */
3177#ifdef VBOX_STRICT
3178 uint32_t const uAttr = pSelReg->Attr.u;
3179#endif
3180
3181 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3182 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3183 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3184
3185#ifdef VBOX_STRICT
3186# ifndef IN_NEM_DARWIN
3187 VMMRZCallRing3Disable(pVCpu);
3188# endif
3189 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3190# ifdef DEBUG_bird
3191 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3192 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3193 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3194# endif
3195# ifndef IN_NEM_DARWIN
3196 VMMRZCallRing3Enable(pVCpu);
3197# endif
3198 NOREF(uAttr);
3199#endif
3200 RT_NOREF2(pVCpu, pszRegName);
3201}
3202
3203
3204/**
3205 * Imports a guest segment register from the current VMCS into the guest-CPU
3206 * context.
3207 *
3208 * @param pVCpu The cross context virtual CPU structure.
3209 * @param iSegReg The segment register number (X86_SREG_XXX).
3210 *
3211 * @remarks Called with interrupts and/or preemption disabled.
3212 */
3213static void vmxHCImportGuestSegReg(PVMCPUCC pVCpu, uint32_t iSegReg)
3214{
3215 Assert(iSegReg < X86_SREG_COUNT);
3216 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
3217 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
3218 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
3219 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
3220
3221 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
3222
3223 uint16_t u16Sel;
3224 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), &u16Sel); AssertRC(rc);
3225 pSelReg->Sel = u16Sel;
3226 pSelReg->ValidSel = u16Sel;
3227
3228 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3229 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), &pSelReg->u64Base); AssertRC(rc);
3230
3231 uint32_t u32Attr;
3232 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), &u32Attr); AssertRC(rc);
3233 pSelReg->Attr.u = u32Attr;
3234 if (u32Attr & X86DESCATTR_UNUSABLE)
3235 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + iSegReg * 3);
3236
3237 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3238}
3239
3240
3241/**
3242 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3243 *
3244 * @param pVCpu The cross context virtual CPU structure.
3245 *
3246 * @remarks Called with interrupts and/or preemption disabled.
3247 */
3248static void vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3249{
3250 uint16_t u16Sel;
3251 uint64_t u64Base;
3252 uint32_t u32Limit, u32Attr;
3253 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3254 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3255 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3256 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3257
3258 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3259 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3260 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3261 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3262 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3263 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3264 if (u32Attr & X86DESCATTR_UNUSABLE)
3265 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3266}
3267
3268
3269/**
3270 * Imports the guest TR from the current VMCS into the guest-CPU context.
3271 *
3272 * @param pVCpu The cross context virtual CPU structure.
3273 *
3274 * @remarks Called with interrupts and/or preemption disabled.
3275 */
3276static void vmxHCImportGuestTr(PVMCPUCC pVCpu)
3277{
3278 uint16_t u16Sel;
3279 uint64_t u64Base;
3280 uint32_t u32Limit, u32Attr;
3281 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3282 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3283 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3284 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3285
3286 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3287 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3288 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3289 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3290 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3291 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3292 /* TR is the only selector that can never be unusable. */
3293 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3294}
3295
3296
3297/**
3298 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3299 *
3300 * @param pVCpu The cross context virtual CPU structure.
3301 *
3302 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3303 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3304 * instead!!!
3305 */
3306static void vmxHCImportGuestRip(PVMCPUCC pVCpu)
3307{
3308 uint64_t u64Val;
3309 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3310 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
3311 {
3312 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3313 AssertRC(rc);
3314
3315 pCtx->rip = u64Val;
3316 EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
3317 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
3318 }
3319}
3320
3321
3322/**
3323 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3324 *
3325 * @param pVCpu The cross context virtual CPU structure.
3326 * @param pVmcsInfo The VMCS info. object.
3327 *
3328 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3329 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3330 * instead!!!
3331 */
3332static void vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3333{
3334 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3335 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
3336 {
3337 uint64_t u64Val;
3338 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3339 AssertRC(rc);
3340
3341 pCtx->rflags.u64 = u64Val;
3342#ifndef IN_NEM_DARWIN
3343 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3344 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3345 {
3346 pCtx->eflags.Bits.u1VM = 0;
3347 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3348 }
3349#else
3350 RT_NOREF(pVmcsInfo);
3351#endif
3352 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3353 }
3354}
3355
3356
3357/**
3358 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3359 * context.
3360 *
3361 * @param pVCpu The cross context virtual CPU structure.
3362 * @param pVmcsInfo The VMCS info. object.
3363 *
3364 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3365 * do not log!
3366 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3367 * instead!!!
3368 */
3369static void vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3370{
3371 uint32_t u32Val;
3372 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3373 if (!u32Val)
3374 {
3375 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3376 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3377 CPUMSetGuestNmiBlocking(pVCpu, false);
3378 }
3379 else
3380 {
3381 /*
3382 * We must import RIP here to set our EM interrupt-inhibited state.
3383 * We also import RFLAGS as our code that evaluates pending interrupts
3384 * before VM-entry requires it.
3385 */
3386 vmxHCImportGuestRip(pVCpu);
3387 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3388
3389 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3390 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
3391 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3392 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3393
3394 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
3395 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
3396 }
3397}
3398
3399
3400/**
3401 * Worker for VMXR0ImportStateOnDemand.
3402 *
3403 * @returns VBox status code.
3404 * @param pVCpu The cross context virtual CPU structure.
3405 * @param pVmcsInfo The VMCS info. object.
3406 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3407 */
3408static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3409{
3410 int rc = VINF_SUCCESS;
3411 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3412 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3413 uint32_t u32Val;
3414
3415 /*
3416 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3417 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3418 * neither are other host platforms.
3419 *
3420 * Committing this temporarily as it prevents BSOD.
3421 *
3422 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3423 */
3424# ifdef RT_OS_WINDOWS
3425 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3426 return VERR_HM_IPE_1;
3427# endif
3428
3429 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3430
3431#ifndef IN_NEM_DARWIN
3432 /*
3433 * We disable interrupts to make the updating of the state and in particular
3434 * the fExtrn modification atomic wrt to preemption hooks.
3435 */
3436 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3437#endif
3438
3439 fWhat &= pCtx->fExtrn;
3440 if (fWhat)
3441 {
3442 do
3443 {
3444 if (fWhat & CPUMCTX_EXTRN_RIP)
3445 vmxHCImportGuestRip(pVCpu);
3446
3447 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3448 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3449
3450 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3451 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3452
3453 if (fWhat & CPUMCTX_EXTRN_RSP)
3454 {
3455 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3456 AssertRC(rc);
3457 }
3458
3459 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3460 {
3461 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3462#ifndef IN_NEM_DARWIN
3463 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3464#else
3465 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3466#endif
3467 if (fWhat & CPUMCTX_EXTRN_CS)
3468 {
3469 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
3470 vmxHCImportGuestRip(pVCpu);
3471 if (fRealOnV86Active)
3472 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3473 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3474 }
3475 if (fWhat & CPUMCTX_EXTRN_SS)
3476 {
3477 vmxHCImportGuestSegReg(pVCpu, X86_SREG_SS);
3478 if (fRealOnV86Active)
3479 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3480 }
3481 if (fWhat & CPUMCTX_EXTRN_DS)
3482 {
3483 vmxHCImportGuestSegReg(pVCpu, X86_SREG_DS);
3484 if (fRealOnV86Active)
3485 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3486 }
3487 if (fWhat & CPUMCTX_EXTRN_ES)
3488 {
3489 vmxHCImportGuestSegReg(pVCpu, X86_SREG_ES);
3490 if (fRealOnV86Active)
3491 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3492 }
3493 if (fWhat & CPUMCTX_EXTRN_FS)
3494 {
3495 vmxHCImportGuestSegReg(pVCpu, X86_SREG_FS);
3496 if (fRealOnV86Active)
3497 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3498 }
3499 if (fWhat & CPUMCTX_EXTRN_GS)
3500 {
3501 vmxHCImportGuestSegReg(pVCpu, X86_SREG_GS);
3502 if (fRealOnV86Active)
3503 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3504 }
3505 }
3506
3507 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3508 {
3509 if (fWhat & CPUMCTX_EXTRN_LDTR)
3510 vmxHCImportGuestLdtr(pVCpu);
3511
3512 if (fWhat & CPUMCTX_EXTRN_GDTR)
3513 {
3514 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3515 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3516 pCtx->gdtr.cbGdt = u32Val;
3517 }
3518
3519 /* Guest IDTR. */
3520 if (fWhat & CPUMCTX_EXTRN_IDTR)
3521 {
3522 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3523 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3524 pCtx->idtr.cbIdt = u32Val;
3525 }
3526
3527 /* Guest TR. */
3528 if (fWhat & CPUMCTX_EXTRN_TR)
3529 {
3530#ifndef IN_NEM_DARWIN
3531 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3532 don't need to import that one. */
3533 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3534#endif
3535 vmxHCImportGuestTr(pVCpu);
3536 }
3537 }
3538
3539 if (fWhat & CPUMCTX_EXTRN_DR7)
3540 {
3541#ifndef IN_NEM_DARWIN
3542 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3543#endif
3544 {
3545 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3546 AssertRC(rc);
3547 }
3548 }
3549
3550 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3551 {
3552 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3553 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3554 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3555 pCtx->SysEnter.cs = u32Val;
3556 }
3557
3558#ifndef IN_NEM_DARWIN
3559 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3560 {
3561 if ( pVM->hmr0.s.fAllow64BitGuests
3562 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3563 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3564 }
3565
3566 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3567 {
3568 if ( pVM->hmr0.s.fAllow64BitGuests
3569 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3570 {
3571 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3572 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3573 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3574 }
3575 }
3576
3577 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3578 {
3579 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3580 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3581 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3582 Assert(pMsrs);
3583 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3584 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3585 for (uint32_t i = 0; i < cMsrs; i++)
3586 {
3587 uint32_t const idMsr = pMsrs[i].u32Msr;
3588 switch (idMsr)
3589 {
3590 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3591 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3592 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3593 default:
3594 {
3595 uint32_t idxLbrMsr;
3596 if (VM_IS_VMX_LBR(pVM))
3597 {
3598 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3599 {
3600 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3601 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3602 break;
3603 }
3604 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3605 {
3606 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3607 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3608 break;
3609 }
3610 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3611 {
3612 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3613 break;
3614 }
3615 /* Fallthru (no break) */
3616 }
3617 pCtx->fExtrn = 0;
3618 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3619 ASMSetFlags(fEFlags);
3620 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3621 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3622 }
3623 }
3624 }
3625 }
3626#endif
3627
3628 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3629 {
3630 if (fWhat & CPUMCTX_EXTRN_CR0)
3631 {
3632 uint64_t u64Cr0;
3633 uint64_t u64Shadow;
3634 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3635 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3636#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3637 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3638 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3639#else
3640 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3641 {
3642 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3643 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3644 }
3645 else
3646 {
3647 /*
3648 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3649 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3650 * re-construct CR0. See @bugref{9180#c95} for details.
3651 */
3652 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3653 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3654 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3655 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3656 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3657 }
3658#endif
3659#ifndef IN_NEM_DARWIN
3660 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3661#endif
3662 CPUMSetGuestCR0(pVCpu, u64Cr0);
3663#ifndef IN_NEM_DARWIN
3664 VMMRZCallRing3Enable(pVCpu);
3665#endif
3666 }
3667
3668 if (fWhat & CPUMCTX_EXTRN_CR4)
3669 {
3670 uint64_t u64Cr4;
3671 uint64_t u64Shadow;
3672 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3673 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3674#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3675 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3676 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3677#else
3678 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3679 {
3680 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3681 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3682 }
3683 else
3684 {
3685 /*
3686 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3687 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3688 * re-construct CR4. See @bugref{9180#c95} for details.
3689 */
3690 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3691 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3692 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3693 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3694 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3695 }
3696#endif
3697 pCtx->cr4 = u64Cr4;
3698 }
3699
3700 if (fWhat & CPUMCTX_EXTRN_CR3)
3701 {
3702 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3703 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3704 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3705 && CPUMIsGuestPagingEnabledEx(pCtx)))
3706 {
3707 uint64_t u64Cr3;
3708 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3709 if (pCtx->cr3 != u64Cr3)
3710 {
3711 pCtx->cr3 = u64Cr3;
3712 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3713 }
3714
3715 /*
3716 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3717 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3718 */
3719 if (CPUMIsGuestInPAEModeEx(pCtx))
3720 {
3721 X86PDPE aPaePdpes[4];
3722 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3723 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3724 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3725 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3726 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3727 {
3728 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3729 /* PGM now updates PAE PDPTEs while updating CR3. */
3730 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3731 }
3732 }
3733 }
3734 }
3735 }
3736
3737#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3738 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3739 {
3740 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3741 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3742 {
3743 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3744 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3745 if (RT_SUCCESS(rc))
3746 { /* likely */ }
3747 else
3748 break;
3749 }
3750 }
3751#endif
3752 } while (0);
3753
3754 if (RT_SUCCESS(rc))
3755 {
3756 /* Update fExtrn. */
3757 pCtx->fExtrn &= ~fWhat;
3758
3759 /* If everything has been imported, clear the HM keeper bit. */
3760 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3761 {
3762#ifndef IN_NEM_DARWIN
3763 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3764#else
3765 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3766#endif
3767 Assert(!pCtx->fExtrn);
3768 }
3769 }
3770 }
3771#ifndef IN_NEM_DARWIN
3772 else
3773 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3774
3775 /*
3776 * Restore interrupts.
3777 */
3778 ASMSetFlags(fEFlags);
3779#endif
3780
3781 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3782
3783 if (RT_SUCCESS(rc))
3784 { /* likely */ }
3785 else
3786 return rc;
3787
3788 /*
3789 * Honor any pending CR3 updates.
3790 *
3791 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3792 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3793 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3794 *
3795 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3796 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3797 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3798 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3799 *
3800 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3801 *
3802 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3803 */
3804 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3805#ifndef IN_NEM_DARWIN
3806 && VMMRZCallRing3IsEnabled(pVCpu)
3807#endif
3808 )
3809 {
3810 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3811 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3812 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3813 }
3814
3815 return VINF_SUCCESS;
3816}
3817
3818
3819/**
3820 * Check per-VM and per-VCPU force flag actions that require us to go back to
3821 * ring-3 for one reason or another.
3822 *
3823 * @returns Strict VBox status code (i.e. informational status codes too)
3824 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3825 * ring-3.
3826 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3827 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3828 * interrupts)
3829 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3830 * all EMTs to be in ring-3.
3831 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3832 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3833 * to the EM loop.
3834 *
3835 * @param pVCpu The cross context virtual CPU structure.
3836 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
3837 * @param fStepping Whether we are single-stepping the guest using the
3838 * hypervisor debugger.
3839 *
3840 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
3841 * is no longer in VMX non-root mode.
3842 */
3843static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
3844{
3845#ifndef IN_NEM_DARWIN
3846 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3847#endif
3848
3849 /*
3850 * Update pending interrupts into the APIC's IRR.
3851 */
3852 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3853 APICUpdatePendingInterrupts(pVCpu);
3854
3855 /*
3856 * Anything pending? Should be more likely than not if we're doing a good job.
3857 */
3858 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3859 if ( !fStepping
3860 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
3861 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
3862 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
3863 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3864 return VINF_SUCCESS;
3865
3866 /* Pending PGM C3 sync. */
3867 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
3868 {
3869 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3870 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
3871 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
3872 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
3873 if (rcStrict != VINF_SUCCESS)
3874 {
3875 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
3876 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
3877 return rcStrict;
3878 }
3879 }
3880
3881 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
3882 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
3883 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3884 {
3885 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
3886 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
3887 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
3888 return rc;
3889 }
3890
3891 /* Pending VM request packets, such as hardware interrupts. */
3892 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
3893 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
3894 {
3895 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
3896 Log4Func(("Pending VM request forcing us back to ring-3\n"));
3897 return VINF_EM_PENDING_REQUEST;
3898 }
3899
3900 /* Pending PGM pool flushes. */
3901 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3902 {
3903 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
3904 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
3905 return VINF_PGM_POOL_FLUSH_PENDING;
3906 }
3907
3908 /* Pending DMA requests. */
3909 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
3910 {
3911 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
3912 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
3913 return VINF_EM_RAW_TO_R3;
3914 }
3915
3916#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3917 /*
3918 * Pending nested-guest events.
3919 *
3920 * Please note the priority of these events are specified and important.
3921 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
3922 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
3923 */
3924 if (fIsNestedGuest)
3925 {
3926 /* Pending nested-guest APIC-write. */
3927 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3928 {
3929 Log4Func(("Pending nested-guest APIC-write\n"));
3930 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
3931 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3932 return rcStrict;
3933 }
3934
3935 /* Pending nested-guest monitor-trap flag (MTF). */
3936 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
3937 {
3938 Log4Func(("Pending nested-guest MTF\n"));
3939 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
3940 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3941 return rcStrict;
3942 }
3943
3944 /* Pending nested-guest VMX-preemption timer expired. */
3945 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
3946 {
3947 Log4Func(("Pending nested-guest preempt timer\n"));
3948 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
3949 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3950 return rcStrict;
3951 }
3952 }
3953#else
3954 NOREF(fIsNestedGuest);
3955#endif
3956
3957 return VINF_SUCCESS;
3958}
3959
3960
3961/**
3962 * Converts any TRPM trap into a pending HM event. This is typically used when
3963 * entering from ring-3 (not longjmp returns).
3964 *
3965 * @param pVCpu The cross context virtual CPU structure.
3966 */
3967static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
3968{
3969 Assert(TRPMHasTrap(pVCpu));
3970 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3971
3972 uint8_t uVector;
3973 TRPMEVENT enmTrpmEvent;
3974 uint32_t uErrCode;
3975 RTGCUINTPTR GCPtrFaultAddress;
3976 uint8_t cbInstr;
3977 bool fIcebp;
3978
3979 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
3980 AssertRC(rc);
3981
3982 uint32_t u32IntInfo;
3983 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
3984 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
3985
3986 rc = TRPMResetTrap(pVCpu);
3987 AssertRC(rc);
3988 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
3989 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
3990
3991 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
3992}
3993
3994
3995/**
3996 * Converts the pending HM event into a TRPM trap.
3997 *
3998 * @param pVCpu The cross context virtual CPU structure.
3999 */
4000static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4001{
4002 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4003
4004 /* If a trap was already pending, we did something wrong! */
4005 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4006
4007 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4008 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4009 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4010
4011 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4012
4013 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4014 AssertRC(rc);
4015
4016 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4017 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4018
4019 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4020 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4021 else
4022 {
4023 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4024 switch (uVectorType)
4025 {
4026 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4027 TRPMSetTrapDueToIcebp(pVCpu);
4028 RT_FALL_THRU();
4029 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4030 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4031 {
4032 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4033 || ( uVector == X86_XCPT_BP /* INT3 */
4034 || uVector == X86_XCPT_OF /* INTO */
4035 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4036 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4037 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4038 break;
4039 }
4040 }
4041 }
4042
4043 /* We're now done converting the pending event. */
4044 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4045}
4046
4047
4048/**
4049 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4050 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4051 *
4052 * @param pVCpu The cross context virtual CPU structure.
4053 * @param pVmcsInfo The VMCS info. object.
4054 */
4055static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4056{
4057 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4058 {
4059 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4060 {
4061 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4062 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4063 AssertRC(rc);
4064 }
4065 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4066}
4067
4068
4069/**
4070 * Clears the interrupt-window exiting control in the VMCS.
4071 *
4072 * @param pVCpu The cross context virtual CPU structure.
4073 * @param pVmcsInfo The VMCS info. object.
4074 */
4075DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4076{
4077 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4078 {
4079 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4080 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4081 AssertRC(rc);
4082 }
4083}
4084
4085
4086/**
4087 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4088 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4089 *
4090 * @param pVCpu The cross context virtual CPU structure.
4091 * @param pVmcsInfo The VMCS info. object.
4092 */
4093static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4094{
4095 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4096 {
4097 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4098 {
4099 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4100 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4101 AssertRC(rc);
4102 Log4Func(("Setup NMI-window exiting\n"));
4103 }
4104 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4105}
4106
4107
4108/**
4109 * Clears the NMI-window exiting control in the VMCS.
4110 *
4111 * @param pVCpu The cross context virtual CPU structure.
4112 * @param pVmcsInfo The VMCS info. object.
4113 */
4114DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4115{
4116 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4117 {
4118 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4119 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4120 AssertRC(rc);
4121 }
4122}
4123
4124
4125/**
4126 * Injects an event into the guest upon VM-entry by updating the relevant fields
4127 * in the VM-entry area in the VMCS.
4128 *
4129 * @returns Strict VBox status code (i.e. informational status codes too).
4130 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4131 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4132 *
4133 * @param pVCpu The cross context virtual CPU structure.
4134 * @param pVmxTransient The VMX-transient structure.
4135 * @param pEvent The event being injected.
4136 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4137 * will be updated if necessary. This cannot not be NULL.
4138 * @param fStepping Whether we're single-stepping guest execution and should
4139 * return VINF_EM_DBG_STEPPED if the event is injected
4140 * directly (registers modified by us, not by hardware on
4141 * VM-entry).
4142 */
4143static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent, bool fStepping,
4144 uint32_t *pfIntrState)
4145{
4146 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4147 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4148 Assert(pfIntrState);
4149
4150#ifdef IN_NEM_DARWIN
4151 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4152#endif
4153
4154 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4155 uint32_t u32IntInfo = pEvent->u64IntInfo;
4156 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4157 uint32_t const cbInstr = pEvent->cbInstr;
4158 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4159 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4160 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4161
4162#ifdef VBOX_STRICT
4163 /*
4164 * Validate the error-code-valid bit for hardware exceptions.
4165 * No error codes for exceptions in real-mode.
4166 *
4167 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4168 */
4169 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4170 && !CPUMIsGuestInRealModeEx(pCtx))
4171 {
4172 switch (uVector)
4173 {
4174 case X86_XCPT_PF:
4175 case X86_XCPT_DF:
4176 case X86_XCPT_TS:
4177 case X86_XCPT_NP:
4178 case X86_XCPT_SS:
4179 case X86_XCPT_GP:
4180 case X86_XCPT_AC:
4181 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4182 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4183 RT_FALL_THRU();
4184 default:
4185 break;
4186 }
4187 }
4188
4189 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4190 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4191 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4192#endif
4193
4194 RT_NOREF(uVector);
4195 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4196 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4197 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4198 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4199 {
4200 Assert(uVector <= X86_XCPT_LAST);
4201 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4202 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4203 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4204 }
4205 else
4206 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4207
4208 /*
4209 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4210 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4211 * interrupt handler in the (real-mode) guest.
4212 *
4213 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4214 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4215 */
4216 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4217 {
4218#ifndef IN_NEM_DARWIN
4219 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4220#endif
4221 {
4222 /*
4223 * For CPUs with unrestricted guest execution enabled and with the guest
4224 * in real-mode, we must not set the deliver-error-code bit.
4225 *
4226 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4227 */
4228 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4229 }
4230#ifndef IN_NEM_DARWIN
4231 else
4232 {
4233 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4234 Assert(PDMVmmDevHeapIsEnabled(pVM));
4235 Assert(pVM->hm.s.vmx.pRealModeTSS);
4236 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4237
4238 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4239 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4240 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4241 AssertRCReturn(rc2, rc2);
4242
4243 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4244 size_t const cbIdtEntry = sizeof(X86IDTR16);
4245 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4246 {
4247 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4248 if (uVector == X86_XCPT_DF)
4249 return VINF_EM_RESET;
4250
4251 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4252 No error codes for exceptions in real-mode. */
4253 if (uVector == X86_XCPT_GP)
4254 {
4255 uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4256 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4257 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4258 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4259 HMEVENT EventXcptDf;
4260 RT_ZERO(EventXcptDf);
4261 EventXcptDf.u64IntInfo = uXcptDfInfo;
4262 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptDf, fStepping, pfIntrState);
4263 }
4264
4265 /*
4266 * If we're injecting an event with no valid IDT entry, inject a #GP.
4267 * No error codes for exceptions in real-mode.
4268 *
4269 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4270 */
4271 uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4272 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4273 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4274 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4275 HMEVENT EventXcptGp;
4276 RT_ZERO(EventXcptGp);
4277 EventXcptGp.u64IntInfo = uXcptGpInfo;
4278 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptGp, fStepping, pfIntrState);
4279 }
4280
4281 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4282 uint16_t uGuestIp = pCtx->ip;
4283 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4284 {
4285 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4286 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4287 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4288 }
4289 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4290 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4291
4292 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4293 X86IDTR16 IdtEntry;
4294 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4295 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4296 AssertRCReturn(rc2, rc2);
4297
4298 /* Construct the stack frame for the interrupt/exception handler. */
4299 VBOXSTRICTRC rcStrict;
4300 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4301 if (rcStrict == VINF_SUCCESS)
4302 {
4303 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4304 if (rcStrict == VINF_SUCCESS)
4305 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4306 }
4307
4308 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4309 if (rcStrict == VINF_SUCCESS)
4310 {
4311 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4312 pCtx->rip = IdtEntry.offSel;
4313 pCtx->cs.Sel = IdtEntry.uSel;
4314 pCtx->cs.ValidSel = IdtEntry.uSel;
4315 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4316 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4317 && uVector == X86_XCPT_PF)
4318 pCtx->cr2 = GCPtrFault;
4319
4320 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4321 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4322 | HM_CHANGED_GUEST_RSP);
4323
4324 /*
4325 * If we delivered a hardware exception (other than an NMI) and if there was
4326 * block-by-STI in effect, we should clear it.
4327 */
4328 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4329 {
4330 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4331 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4332 Log4Func(("Clearing inhibition due to STI\n"));
4333 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4334 }
4335
4336 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4337 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4338
4339 /*
4340 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4341 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4342 */
4343 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4344
4345 /*
4346 * If we eventually support nested-guest execution without unrestricted guest execution,
4347 * we should set fInterceptEvents here.
4348 */
4349 Assert(!fIsNestedGuest);
4350
4351 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4352 if (fStepping)
4353 rcStrict = VINF_EM_DBG_STEPPED;
4354 }
4355 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4356 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4357 return rcStrict;
4358 }
4359#else
4360 RT_NOREF(pVmcsInfo);
4361#endif
4362 }
4363
4364 /*
4365 * Validate.
4366 */
4367 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4368 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4369
4370 /*
4371 * Inject the event into the VMCS.
4372 */
4373 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4374 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4375 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4376 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4377 AssertRC(rc);
4378
4379 /*
4380 * Update guest CR2 if this is a page-fault.
4381 */
4382 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4383 pCtx->cr2 = GCPtrFault;
4384
4385 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4386 return VINF_SUCCESS;
4387}
4388
4389
4390/**
4391 * Evaluates the event to be delivered to the guest and sets it as the pending
4392 * event.
4393 *
4394 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4395 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4396 * NOT restore these force-flags.
4397 *
4398 * @returns Strict VBox status code (i.e. informational status codes too).
4399 * @param pVCpu The cross context virtual CPU structure.
4400 * @param pVmcsInfo The VMCS information structure.
4401 * @param fIsNestedGuest Flag whether the evaluation happens for a nestd guest.
4402 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4403 */
4404static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4405{
4406 Assert(pfIntrState);
4407 Assert(!TRPMHasTrap(pVCpu));
4408
4409 /*
4410 * Compute/update guest-interruptibility state related FFs.
4411 * The FFs will be used below while evaluating events to be injected.
4412 */
4413 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4414
4415 /*
4416 * Evaluate if a new event needs to be injected.
4417 * An event that's already pending has already performed all necessary checks.
4418 */
4419 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4420 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4421 {
4422 /** @todo SMI. SMIs take priority over NMIs. */
4423
4424 /*
4425 * NMIs.
4426 * NMIs take priority over external interrupts.
4427 */
4428#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4429 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4430#endif
4431 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4432 {
4433 /*
4434 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4435 *
4436 * For a nested-guest, the FF always indicates the outer guest's ability to
4437 * receive an NMI while the guest-interruptibility state bit depends on whether
4438 * the nested-hypervisor is using virtual-NMIs.
4439 */
4440 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4441 {
4442#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4443 if ( fIsNestedGuest
4444 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4445 return IEMExecVmxVmexitXcptNmi(pVCpu);
4446#endif
4447 vmxHCSetPendingXcptNmi(pVCpu);
4448 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4449 Log4Func(("NMI pending injection\n"));
4450
4451 /* We've injected the NMI, bail. */
4452 return VINF_SUCCESS;
4453 }
4454 else if (!fIsNestedGuest)
4455 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4456 }
4457
4458 /*
4459 * External interrupts (PIC/APIC).
4460 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4461 * We cannot re-request the interrupt from the controller again.
4462 */
4463 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4464 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4465 {
4466 Assert(!DBGFIsStepping(pVCpu));
4467 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4468 AssertRC(rc);
4469
4470 /*
4471 * We must not check EFLAGS directly when executing a nested-guest, use
4472 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4473 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4474 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4475 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4476 *
4477 * See Intel spec. 25.4.1 "Event Blocking".
4478 */
4479 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4480 {
4481#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4482 if ( fIsNestedGuest
4483 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4484 {
4485 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4486 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4487 return rcStrict;
4488 }
4489#endif
4490 uint8_t u8Interrupt;
4491 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4492 if (RT_SUCCESS(rc))
4493 {
4494#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4495 if ( fIsNestedGuest
4496 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4497 {
4498 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4499 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4500 return rcStrict;
4501 }
4502#endif
4503 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4504 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4505 }
4506 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4507 {
4508 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4509
4510 if ( !fIsNestedGuest
4511 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4512 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4513 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4514
4515 /*
4516 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4517 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4518 * need to re-set this force-flag here.
4519 */
4520 }
4521 else
4522 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4523
4524 /* We've injected the interrupt or taken necessary action, bail. */
4525 return VINF_SUCCESS;
4526 }
4527 if (!fIsNestedGuest)
4528 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4529 }
4530 }
4531 else if (!fIsNestedGuest)
4532 {
4533 /*
4534 * An event is being injected or we are in an interrupt shadow. Check if another event is
4535 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
4536 * the pending event.
4537 */
4538 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4539 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4540 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4541 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4542 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4543 }
4544 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
4545
4546 return VINF_SUCCESS;
4547}
4548
4549
4550/**
4551 * Injects any pending events into the guest if the guest is in a state to
4552 * receive them.
4553 *
4554 * @returns Strict VBox status code (i.e. informational status codes too).
4555 * @param pVCpu The cross context virtual CPU structure.
4556 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
4557 * @param fIntrState The VT-x guest-interruptibility state.
4558 * @param fStepping Whether we are single-stepping the guest using the
4559 * hypervisor debugger and should return
4560 * VINF_EM_DBG_STEPPED if the event was dispatched
4561 * directly.
4562 */
4563static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t fIntrState, bool fStepping)
4564{
4565 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
4566#ifndef IN_NEM_DARWIN
4567 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4568#endif
4569
4570#ifdef VBOX_STRICT
4571 /*
4572 * Verify guest-interruptibility state.
4573 *
4574 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
4575 * since injecting an event may modify the interruptibility state and we must thus always
4576 * use fIntrState.
4577 */
4578 {
4579 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
4580 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
4581 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
4582 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
4583 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
4584 Assert(!TRPMHasTrap(pVCpu));
4585 NOREF(fBlockMovSS); NOREF(fBlockSti);
4586 }
4587#endif
4588
4589 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4590 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
4591 {
4592 /*
4593 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
4594 * pending even while injecting an event and in this case, we want a VM-exit as soon as
4595 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
4596 *
4597 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
4598 */
4599 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
4600#ifdef VBOX_STRICT
4601 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4602 {
4603 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
4604 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4605 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4606 }
4607 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
4608 {
4609 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
4610 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4611 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4612 }
4613#endif
4614 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
4615 uIntType));
4616
4617 /*
4618 * Inject the event and get any changes to the guest-interruptibility state.
4619 *
4620 * The guest-interruptibility state may need to be updated if we inject the event
4621 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
4622 */
4623 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
4624 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
4625
4626 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4627 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
4628 else
4629 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
4630 }
4631
4632 /*
4633 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
4634 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
4635 */
4636 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
4637 && !fIsNestedGuest)
4638 {
4639 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
4640
4641 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4642 {
4643 /*
4644 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
4645 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
4646 */
4647 Assert(!DBGFIsStepping(pVCpu));
4648 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
4649 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
4650 AssertRC(rc);
4651 }
4652 else
4653 {
4654 /*
4655 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
4656 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
4657 * we take care of this case in vmxHCExportSharedDebugState and also the case if
4658 * we use MTF, so just make sure it's called before executing guest-code.
4659 */
4660 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
4661 }
4662 }
4663 /* else: for nested-guest currently handling while merging controls. */
4664
4665 /*
4666 * Finally, update the guest-interruptibility state.
4667 *
4668 * This is required for the real-on-v86 software interrupt injection, for
4669 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
4670 */
4671 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
4672 AssertRC(rc);
4673
4674 /*
4675 * There's no need to clear the VM-entry interruption-information field here if we're not
4676 * injecting anything. VT-x clears the valid bit on every VM-exit.
4677 *
4678 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
4679 */
4680
4681 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
4682 return rcStrict;
4683}
4684
4685
4686/**
4687 * Tries to determine what part of the guest-state VT-x has deemed as invalid
4688 * and update error record fields accordingly.
4689 *
4690 * @returns VMX_IGS_* error codes.
4691 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
4692 * wrong with the guest state.
4693 *
4694 * @param pVCpu The cross context virtual CPU structure.
4695 * @param pVmcsInfo The VMCS info. object.
4696 *
4697 * @remarks This function assumes our cache of the VMCS controls
4698 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
4699 */
4700static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
4701{
4702#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
4703#define HMVMX_CHECK_BREAK(expr, err) do { \
4704 if (!(expr)) { uError = (err); break; } \
4705 } while (0)
4706
4707 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4708 uint32_t uError = VMX_IGS_ERROR;
4709 uint32_t u32IntrState = 0;
4710#ifndef IN_NEM_DARWIN
4711 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4712 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
4713#else
4714 bool const fUnrestrictedGuest = true;
4715#endif
4716 do
4717 {
4718 int rc;
4719
4720 /*
4721 * Guest-interruptibility state.
4722 *
4723 * Read this first so that any check that fails prior to those that actually
4724 * require the guest-interruptibility state would still reflect the correct
4725 * VMCS value and avoids causing further confusion.
4726 */
4727 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
4728 AssertRC(rc);
4729
4730 uint32_t u32Val;
4731 uint64_t u64Val;
4732
4733 /*
4734 * CR0.
4735 */
4736 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4737 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
4738 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
4739 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
4740 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
4741 if (fUnrestrictedGuest)
4742 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
4743
4744 uint64_t u64GuestCr0;
4745 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
4746 AssertRC(rc);
4747 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
4748 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
4749 if ( !fUnrestrictedGuest
4750 && (u64GuestCr0 & X86_CR0_PG)
4751 && !(u64GuestCr0 & X86_CR0_PE))
4752 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
4753
4754 /*
4755 * CR4.
4756 */
4757 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4758 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
4759 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
4760
4761 uint64_t u64GuestCr4;
4762 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
4763 AssertRC(rc);
4764 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
4765 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
4766
4767 /*
4768 * IA32_DEBUGCTL MSR.
4769 */
4770 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
4771 AssertRC(rc);
4772 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4773 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
4774 {
4775 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
4776 }
4777 uint64_t u64DebugCtlMsr = u64Val;
4778
4779#ifdef VBOX_STRICT
4780 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
4781 AssertRC(rc);
4782 Assert(u32Val == pVmcsInfo->u32EntryCtls);
4783#endif
4784 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4785
4786 /*
4787 * RIP and RFLAGS.
4788 */
4789 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
4790 AssertRC(rc);
4791 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
4792 if ( !fLongModeGuest
4793 || !pCtx->cs.Attr.n.u1Long)
4794 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
4795 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
4796 * must be identical if the "IA-32e mode guest" VM-entry
4797 * control is 1 and CS.L is 1. No check applies if the
4798 * CPU supports 64 linear-address bits. */
4799
4800 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
4801 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
4802 AssertRC(rc);
4803 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
4804 VMX_IGS_RFLAGS_RESERVED);
4805 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
4806 uint32_t const u32Eflags = u64Val;
4807
4808 if ( fLongModeGuest
4809 || ( fUnrestrictedGuest
4810 && !(u64GuestCr0 & X86_CR0_PE)))
4811 {
4812 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
4813 }
4814
4815 uint32_t u32EntryInfo;
4816 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
4817 AssertRC(rc);
4818 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
4819 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
4820
4821 /*
4822 * 64-bit checks.
4823 */
4824 if (fLongModeGuest)
4825 {
4826 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
4827 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
4828 }
4829
4830 if ( !fLongModeGuest
4831 && (u64GuestCr4 & X86_CR4_PCIDE))
4832 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
4833
4834 /** @todo CR3 field must be such that bits 63:52 and bits in the range
4835 * 51:32 beyond the processor's physical-address width are 0. */
4836
4837 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4838 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
4839 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
4840
4841#ifndef IN_NEM_DARWIN
4842 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
4843 AssertRC(rc);
4844 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
4845
4846 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
4847 AssertRC(rc);
4848 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
4849#endif
4850
4851 /*
4852 * PERF_GLOBAL MSR.
4853 */
4854 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
4855 {
4856 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
4857 AssertRC(rc);
4858 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
4859 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
4860 }
4861
4862 /*
4863 * PAT MSR.
4864 */
4865 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4866 {
4867 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
4868 AssertRC(rc);
4869 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
4870 for (unsigned i = 0; i < 8; i++)
4871 {
4872 uint8_t u8Val = (u64Val & 0xff);
4873 if ( u8Val != 0 /* UC */
4874 && u8Val != 1 /* WC */
4875 && u8Val != 4 /* WT */
4876 && u8Val != 5 /* WP */
4877 && u8Val != 6 /* WB */
4878 && u8Val != 7 /* UC- */)
4879 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
4880 u64Val >>= 8;
4881 }
4882 }
4883
4884 /*
4885 * EFER MSR.
4886 */
4887 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4888 {
4889 Assert(g_fHmVmxSupportsVmcsEfer);
4890 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
4891 AssertRC(rc);
4892 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
4893 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
4894 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
4895 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
4896 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
4897 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
4898 * iemVmxVmentryCheckGuestState(). */
4899 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4900 || !(u64GuestCr0 & X86_CR0_PG)
4901 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
4902 VMX_IGS_EFER_LMA_LME_MISMATCH);
4903 }
4904
4905 /*
4906 * Segment registers.
4907 */
4908 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
4909 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
4910 if (!(u32Eflags & X86_EFL_VM))
4911 {
4912 /* CS */
4913 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
4914 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
4915 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
4916 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4917 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4918 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
4919 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4920 /* CS cannot be loaded with NULL in protected mode. */
4921 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
4922 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
4923 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4924 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
4925 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4926 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
4927 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
4928 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
4929 else
4930 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
4931
4932 /* SS */
4933 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4934 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
4935 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
4936 if ( !(pCtx->cr0 & X86_CR0_PE)
4937 || pCtx->cs.Attr.n.u4Type == 3)
4938 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
4939
4940 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4941 {
4942 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
4943 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
4944 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
4945 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
4946 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4947 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4948 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
4949 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4950 }
4951
4952 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
4953 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4954 {
4955 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
4956 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
4957 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4958 || pCtx->ds.Attr.n.u4Type > 11
4959 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4960 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
4961 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
4962 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4963 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4964 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
4965 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4966 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4967 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
4968 }
4969 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4970 {
4971 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
4972 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
4973 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4974 || pCtx->es.Attr.n.u4Type > 11
4975 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4976 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
4977 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
4978 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
4979 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
4980 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
4981 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
4982 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4983 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
4984 }
4985 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
4986 {
4987 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
4988 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
4989 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4990 || pCtx->fs.Attr.n.u4Type > 11
4991 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
4992 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
4993 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
4994 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
4995 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
4996 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
4997 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
4998 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4999 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5000 }
5001 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5002 {
5003 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5004 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5005 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5006 || pCtx->gs.Attr.n.u4Type > 11
5007 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5008 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5009 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5010 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5011 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5012 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5013 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5014 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5015 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5016 }
5017 /* 64-bit capable CPUs. */
5018 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5019 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5020 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5021 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5022 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5023 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5024 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5025 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5026 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5027 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5028 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5029 }
5030 else
5031 {
5032 /* V86 mode checks. */
5033 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5034 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5035 {
5036 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5037 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5038 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5039 }
5040 else
5041 {
5042 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5043 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5044 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5045 }
5046
5047 /* CS */
5048 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5049 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5050 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5051 /* SS */
5052 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5053 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5054 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5055 /* DS */
5056 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5057 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5058 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5059 /* ES */
5060 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5061 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5062 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5063 /* FS */
5064 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5065 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5066 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5067 /* GS */
5068 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5069 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5070 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5071 /* 64-bit capable CPUs. */
5072 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5073 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5074 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5075 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5076 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5077 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5078 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5079 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5080 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5081 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5082 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5083 }
5084
5085 /*
5086 * TR.
5087 */
5088 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5089 /* 64-bit capable CPUs. */
5090 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5091 if (fLongModeGuest)
5092 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5093 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5094 else
5095 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5096 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5097 VMX_IGS_TR_ATTR_TYPE_INVALID);
5098 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5099 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5100 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5101 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5102 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5103 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5104 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5105 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5106
5107 /*
5108 * GDTR and IDTR (64-bit capable checks).
5109 */
5110 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5111 AssertRC(rc);
5112 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5113
5114 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5115 AssertRC(rc);
5116 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5117
5118 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5119 AssertRC(rc);
5120 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5121
5122 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5123 AssertRC(rc);
5124 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5125
5126 /*
5127 * Guest Non-Register State.
5128 */
5129 /* Activity State. */
5130 uint32_t u32ActivityState;
5131 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5132 AssertRC(rc);
5133 HMVMX_CHECK_BREAK( !u32ActivityState
5134 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5135 VMX_IGS_ACTIVITY_STATE_INVALID);
5136 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5137 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5138
5139 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5140 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5141 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5142
5143 /** @todo Activity state and injecting interrupts. Left as a todo since we
5144 * currently don't use activity states but ACTIVE. */
5145
5146 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5147 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5148
5149 /* Guest interruptibility-state. */
5150 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5151 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5152 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5153 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5154 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5155 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5156 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5157 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5158 {
5159 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5160 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5161 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5162 }
5163 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5164 {
5165 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5166 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5167 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5168 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5169 }
5170 /** @todo Assumes the processor is not in SMM. */
5171 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5172 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5173 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5174 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5175 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5176 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5177 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5178 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5179
5180 /* Pending debug exceptions. */
5181 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5182 AssertRC(rc);
5183 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5184 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5185 u32Val = u64Val; /* For pending debug exceptions checks below. */
5186
5187 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5188 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5189 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5190 {
5191 if ( (u32Eflags & X86_EFL_TF)
5192 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5193 {
5194 /* Bit 14 is PendingDebug.BS. */
5195 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5196 }
5197 if ( !(u32Eflags & X86_EFL_TF)
5198 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5199 {
5200 /* Bit 14 is PendingDebug.BS. */
5201 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5202 }
5203 }
5204
5205#ifndef IN_NEM_DARWIN
5206 /* VMCS link pointer. */
5207 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5208 AssertRC(rc);
5209 if (u64Val != UINT64_C(0xffffffffffffffff))
5210 {
5211 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5212 /** @todo Bits beyond the processor's physical-address width MBZ. */
5213 /** @todo SMM checks. */
5214 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5215 Assert(pVmcsInfo->pvShadowVmcs);
5216 VMXVMCSREVID VmcsRevId;
5217 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5218 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5219 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5220 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5221 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5222 }
5223
5224 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5225 * not using nested paging? */
5226 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5227 && !fLongModeGuest
5228 && CPUMIsGuestInPAEModeEx(pCtx))
5229 {
5230 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5231 AssertRC(rc);
5232 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5233
5234 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5235 AssertRC(rc);
5236 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5237
5238 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5239 AssertRC(rc);
5240 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5241
5242 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5243 AssertRC(rc);
5244 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5245 }
5246#endif
5247
5248 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5249 if (uError == VMX_IGS_ERROR)
5250 uError = VMX_IGS_REASON_NOT_FOUND;
5251 } while (0);
5252
5253 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5254 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5255 return uError;
5256
5257#undef HMVMX_ERROR_BREAK
5258#undef HMVMX_CHECK_BREAK
5259}
5260/** @} */
5261
5262
5263#ifndef HMVMX_USE_FUNCTION_TABLE
5264/**
5265 * Handles a guest VM-exit from hardware-assisted VMX execution.
5266 *
5267 * @returns Strict VBox status code (i.e. informational status codes too).
5268 * @param pVCpu The cross context virtual CPU structure.
5269 * @param pVmxTransient The VMX-transient structure.
5270 */
5271DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5272{
5273#ifdef DEBUG_ramshankar
5274# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5275 do { \
5276 if (a_fSave != 0) \
5277 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
5278 VBOXSTRICTRC rcStrict = a_CallExpr; \
5279 if (a_fSave != 0) \
5280 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5281 return rcStrict; \
5282 } while (0)
5283#else
5284# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5285#endif
5286 uint32_t const uExitReason = pVmxTransient->uExitReason;
5287 switch (uExitReason)
5288 {
5289 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5290 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5291 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5292 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5293 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5294 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5295 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5296 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5297 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5298 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5299 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5300 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5301 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5302 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5303 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5304 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5305 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5306 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5307 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5308 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5309 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5310 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5311 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5312 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5313 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5314 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5315 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5316 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5317 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5318 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5319#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5320 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5321 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5322 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5323 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5324 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5325 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5326 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5327 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5328 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5329 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5330#else
5331 case VMX_EXIT_VMCLEAR:
5332 case VMX_EXIT_VMLAUNCH:
5333 case VMX_EXIT_VMPTRLD:
5334 case VMX_EXIT_VMPTRST:
5335 case VMX_EXIT_VMREAD:
5336 case VMX_EXIT_VMRESUME:
5337 case VMX_EXIT_VMWRITE:
5338 case VMX_EXIT_VMXOFF:
5339 case VMX_EXIT_VMXON:
5340 case VMX_EXIT_INVVPID:
5341 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5342#endif
5343#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT)
5344 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5345#else
5346 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5347#endif
5348
5349 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5350 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5351 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5352
5353 case VMX_EXIT_INIT_SIGNAL:
5354 case VMX_EXIT_SIPI:
5355 case VMX_EXIT_IO_SMI:
5356 case VMX_EXIT_SMI:
5357 case VMX_EXIT_ERR_MSR_LOAD:
5358 case VMX_EXIT_ERR_MACHINE_CHECK:
5359 case VMX_EXIT_PML_FULL:
5360 case VMX_EXIT_VIRTUALIZED_EOI:
5361 case VMX_EXIT_GDTR_IDTR_ACCESS:
5362 case VMX_EXIT_LDTR_TR_ACCESS:
5363 case VMX_EXIT_APIC_WRITE:
5364 case VMX_EXIT_RDRAND:
5365 case VMX_EXIT_RSM:
5366 case VMX_EXIT_VMFUNC:
5367 case VMX_EXIT_ENCLS:
5368 case VMX_EXIT_RDSEED:
5369 case VMX_EXIT_XSAVES:
5370 case VMX_EXIT_XRSTORS:
5371 case VMX_EXIT_UMWAIT:
5372 case VMX_EXIT_TPAUSE:
5373 case VMX_EXIT_LOADIWKEY:
5374 default:
5375 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5376 }
5377#undef VMEXIT_CALL_RET
5378}
5379#endif /* !HMVMX_USE_FUNCTION_TABLE */
5380
5381
5382#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5383/**
5384 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5385 *
5386 * @returns Strict VBox status code (i.e. informational status codes too).
5387 * @param pVCpu The cross context virtual CPU structure.
5388 * @param pVmxTransient The VMX-transient structure.
5389 */
5390DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5391{
5392 uint32_t const uExitReason = pVmxTransient->uExitReason;
5393 switch (uExitReason)
5394 {
5395# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5396 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5397 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5398# else
5399 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5400 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5401# endif
5402 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5403 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5404 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5405
5406 /*
5407 * We shouldn't direct host physical interrupts to the nested-guest.
5408 */
5409 case VMX_EXIT_EXT_INT:
5410 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5411
5412 /*
5413 * Instructions that cause VM-exits unconditionally or the condition is
5414 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5415 * happens, it's guaranteed to be a nested-guest VM-exit).
5416 *
5417 * - Provides VM-exit instruction length ONLY.
5418 */
5419 case VMX_EXIT_CPUID: /* Unconditional. */
5420 case VMX_EXIT_VMCALL:
5421 case VMX_EXIT_GETSEC:
5422 case VMX_EXIT_INVD:
5423 case VMX_EXIT_XSETBV:
5424 case VMX_EXIT_VMLAUNCH:
5425 case VMX_EXIT_VMRESUME:
5426 case VMX_EXIT_VMXOFF:
5427 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5428 case VMX_EXIT_VMFUNC:
5429 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5430
5431 /*
5432 * Instructions that cause VM-exits unconditionally or the condition is
5433 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5434 * happens, it's guaranteed to be a nested-guest VM-exit).
5435 *
5436 * - Provides VM-exit instruction length.
5437 * - Provides VM-exit information.
5438 * - Optionally provides Exit qualification.
5439 *
5440 * Since Exit qualification is 0 for all VM-exits where it is not
5441 * applicable, reading and passing it to the guest should produce
5442 * defined behavior.
5443 *
5444 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5445 */
5446 case VMX_EXIT_INVEPT: /* Unconditional. */
5447 case VMX_EXIT_INVVPID:
5448 case VMX_EXIT_VMCLEAR:
5449 case VMX_EXIT_VMPTRLD:
5450 case VMX_EXIT_VMPTRST:
5451 case VMX_EXIT_VMXON:
5452 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5453 case VMX_EXIT_LDTR_TR_ACCESS:
5454 case VMX_EXIT_RDRAND:
5455 case VMX_EXIT_RDSEED:
5456 case VMX_EXIT_XSAVES:
5457 case VMX_EXIT_XRSTORS:
5458 case VMX_EXIT_UMWAIT:
5459 case VMX_EXIT_TPAUSE:
5460 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5461
5462 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5463 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5464 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5465 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5466 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5467 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5468 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5469 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5470 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5471 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5472 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5473 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5474 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5475 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5476 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5477 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5478 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5479 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5480 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5481
5482 case VMX_EXIT_PREEMPT_TIMER:
5483 {
5484 /** @todo NSTVMX: Preempt timer. */
5485 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5486 }
5487
5488 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5489 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
5490
5491 case VMX_EXIT_VMREAD:
5492 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
5493
5494 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
5495 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
5496
5497 case VMX_EXIT_INIT_SIGNAL:
5498 case VMX_EXIT_SIPI:
5499 case VMX_EXIT_IO_SMI:
5500 case VMX_EXIT_SMI:
5501 case VMX_EXIT_ERR_MSR_LOAD:
5502 case VMX_EXIT_ERR_MACHINE_CHECK:
5503 case VMX_EXIT_PML_FULL:
5504 case VMX_EXIT_RSM:
5505 default:
5506 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5507 }
5508}
5509#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5510
5511
5512/** @name VM-exit helpers.
5513 * @{
5514 */
5515/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5516/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5517/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5518
5519/** Macro for VM-exits called unexpectedly. */
5520#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5521 do { \
5522 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
5523 return VERR_VMX_UNEXPECTED_EXIT; \
5524 } while (0)
5525
5526#ifdef VBOX_STRICT
5527# ifndef IN_NEM_DARWIN
5528/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5529# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
5530 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5531
5532# define HMVMX_ASSERT_PREEMPT_CPUID() \
5533 do { \
5534 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
5535 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
5536 } while (0)
5537
5538# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5539 do { \
5540 AssertPtr((a_pVCpu)); \
5541 AssertPtr((a_pVmxTransient)); \
5542 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5543 Assert((a_pVmxTransient)->pVmcsInfo); \
5544 Assert(ASMIntAreEnabled()); \
5545 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5546 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
5547 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5548 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5549 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
5550 HMVMX_ASSERT_PREEMPT_CPUID(); \
5551 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5552 } while (0)
5553# else
5554# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
5555# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
5556# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5557 do { \
5558 AssertPtr((a_pVCpu)); \
5559 AssertPtr((a_pVmxTransient)); \
5560 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5561 Assert((a_pVmxTransient)->pVmcsInfo); \
5562 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5563 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5564 } while (0)
5565# endif
5566
5567# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5568 do { \
5569 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
5570 Assert((a_pVmxTransient)->fIsNestedGuest); \
5571 } while (0)
5572
5573# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5574 do { \
5575 Log4Func(("\n")); \
5576 } while (0)
5577#else
5578# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5579 do { \
5580 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5581 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
5582 } while (0)
5583
5584# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5585 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
5586
5587# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
5588#endif
5589
5590#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5591/** Macro that does the necessary privilege checks and intercepted VM-exits for
5592 * guests that attempted to execute a VMX instruction. */
5593# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
5594 do \
5595 { \
5596 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
5597 if (rcStrictTmp == VINF_SUCCESS) \
5598 { /* likely */ } \
5599 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5600 { \
5601 Assert((a_pVCpu)->hm.s.Event.fPending); \
5602 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
5603 return VINF_SUCCESS; \
5604 } \
5605 else \
5606 { \
5607 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
5608 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
5609 } \
5610 } while (0)
5611
5612/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
5613# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
5614 do \
5615 { \
5616 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
5617 (a_pGCPtrEffAddr)); \
5618 if (rcStrictTmp == VINF_SUCCESS) \
5619 { /* likely */ } \
5620 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5621 { \
5622 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
5623 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
5624 NOREF(uXcptTmp); \
5625 return VINF_SUCCESS; \
5626 } \
5627 else \
5628 { \
5629 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
5630 return rcStrictTmp; \
5631 } \
5632 } while (0)
5633#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5634
5635
5636/**
5637 * Advances the guest RIP by the specified number of bytes.
5638 *
5639 * @param pVCpu The cross context virtual CPU structure.
5640 * @param cbInstr Number of bytes to advance the RIP by.
5641 *
5642 * @remarks No-long-jump zone!!!
5643 */
5644DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
5645{
5646 /* Advance the RIP. */
5647 pVCpu->cpum.GstCtx.rip += cbInstr;
5648 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
5649
5650 /* Update interrupt inhibition. */
5651 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5652 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
5653 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5654}
5655
5656
5657/**
5658 * Advances the guest RIP after reading it from the VMCS.
5659 *
5660 * @returns VBox status code, no informational status codes.
5661 * @param pVCpu The cross context virtual CPU structure.
5662 * @param pVmxTransient The VMX-transient structure.
5663 *
5664 * @remarks No-long-jump zone!!!
5665 */
5666static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5667{
5668 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
5669 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
5670 AssertRCReturn(rc, rc);
5671
5672 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
5673 return VINF_SUCCESS;
5674}
5675
5676
5677/**
5678 * Handle a condition that occurred while delivering an event through the guest or
5679 * nested-guest IDT.
5680 *
5681 * @returns Strict VBox status code (i.e. informational status codes too).
5682 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5683 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
5684 * to continue execution of the guest which will delivery the \#DF.
5685 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5686 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5687 *
5688 * @param pVCpu The cross context virtual CPU structure.
5689 * @param pVmxTransient The VMX-transient structure.
5690 *
5691 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
5692 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
5693 * is due to an EPT violation, PML full or SPP-related event.
5694 *
5695 * @remarks No-long-jump zone!!!
5696 */
5697static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5698{
5699 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
5700 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
5701 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5702 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5703 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5704 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
5705
5706 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5707 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5708 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
5709 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
5710 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
5711 {
5712 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
5713 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
5714
5715 /*
5716 * If the event was a software interrupt (generated with INT n) or a software exception
5717 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
5718 * can handle the VM-exit and continue guest execution which will re-execute the
5719 * instruction rather than re-injecting the exception, as that can cause premature
5720 * trips to ring-3 before injection and involve TRPM which currently has no way of
5721 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
5722 * the problem).
5723 */
5724 IEMXCPTRAISE enmRaise;
5725 IEMXCPTRAISEINFO fRaiseInfo;
5726 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5727 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5728 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5729 {
5730 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5731 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5732 }
5733 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
5734 {
5735 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
5736 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
5737 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
5738
5739 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
5740 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
5741
5742 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5743
5744 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
5745 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5746 {
5747 pVmxTransient->fVectoringPF = true;
5748 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5749 }
5750 }
5751 else
5752 {
5753 /*
5754 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
5755 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
5756 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
5757 */
5758 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5759 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5760 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
5761 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5762 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5763 }
5764
5765 /*
5766 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
5767 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
5768 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
5769 * subsequent VM-entry would fail, see @bugref{7445}.
5770 *
5771 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
5772 */
5773 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5774 && enmRaise == IEMXCPTRAISE_PREV_EVENT
5775 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5776 && CPUMIsGuestNmiBlocking(pVCpu))
5777 {
5778 CPUMSetGuestNmiBlocking(pVCpu, false);
5779 }
5780
5781 switch (enmRaise)
5782 {
5783 case IEMXCPTRAISE_CURRENT_XCPT:
5784 {
5785 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
5786 Assert(rcStrict == VINF_SUCCESS);
5787 break;
5788 }
5789
5790 case IEMXCPTRAISE_PREV_EVENT:
5791 {
5792 uint32_t u32ErrCode;
5793 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
5794 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5795 else
5796 u32ErrCode = 0;
5797
5798 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
5799 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
5800 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */,
5801 u32ErrCode, pVCpu->cpum.GstCtx.cr2);
5802
5803 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5804 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
5805 Assert(rcStrict == VINF_SUCCESS);
5806 break;
5807 }
5808
5809 case IEMXCPTRAISE_REEXEC_INSTR:
5810 Assert(rcStrict == VINF_SUCCESS);
5811 break;
5812
5813 case IEMXCPTRAISE_DOUBLE_FAULT:
5814 {
5815 /*
5816 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
5817 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
5818 */
5819 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
5820 {
5821 pVmxTransient->fVectoringDoublePF = true;
5822 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5823 pVCpu->cpum.GstCtx.cr2));
5824 rcStrict = VINF_SUCCESS;
5825 }
5826 else
5827 {
5828 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
5829 vmxHCSetPendingXcptDF(pVCpu);
5830 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5831 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5832 rcStrict = VINF_HM_DOUBLE_FAULT;
5833 }
5834 break;
5835 }
5836
5837 case IEMXCPTRAISE_TRIPLE_FAULT:
5838 {
5839 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
5840 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5841 rcStrict = VINF_EM_RESET;
5842 break;
5843 }
5844
5845 case IEMXCPTRAISE_CPU_HANG:
5846 {
5847 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
5848 rcStrict = VERR_EM_GUEST_CPU_HANG;
5849 break;
5850 }
5851
5852 default:
5853 {
5854 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
5855 rcStrict = VERR_VMX_IPE_2;
5856 break;
5857 }
5858 }
5859 }
5860 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5861 && !CPUMIsGuestNmiBlocking(pVCpu))
5862 {
5863 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
5864 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
5865 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
5866 {
5867 /*
5868 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
5869 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5870 * that virtual NMIs remain blocked until the IRET execution is completed.
5871 *
5872 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
5873 */
5874 CPUMSetGuestNmiBlocking(pVCpu, true);
5875 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5876 }
5877 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5878 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5879 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5880 {
5881 /*
5882 * Execution of IRET caused an EPT violation, page-modification log-full event or
5883 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
5884 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5885 * that virtual NMIs remain blocked until the IRET execution is completed.
5886 *
5887 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
5888 */
5889 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
5890 {
5891 CPUMSetGuestNmiBlocking(pVCpu, true);
5892 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5893 }
5894 }
5895 }
5896
5897 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
5898 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
5899 return rcStrict;
5900}
5901
5902
5903#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5904/**
5905 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
5906 * guest attempting to execute a VMX instruction.
5907 *
5908 * @returns Strict VBox status code (i.e. informational status codes too).
5909 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5910 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
5911 *
5912 * @param pVCpu The cross context virtual CPU structure.
5913 * @param uExitReason The VM-exit reason.
5914 *
5915 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
5916 * @remarks No-long-jump zone!!!
5917 */
5918static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
5919{
5920 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
5921 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
5922
5923 /*
5924 * The physical CPU would have already checked the CPU mode/code segment.
5925 * We shall just assert here for paranoia.
5926 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
5927 */
5928 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
5929 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
5930 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
5931
5932 if (uExitReason == VMX_EXIT_VMXON)
5933 {
5934 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5935
5936 /*
5937 * We check CR4.VMXE because it is required to be always set while in VMX operation
5938 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
5939 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
5940 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
5941 */
5942 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
5943 {
5944 Log4Func(("CR4.VMXE is not set -> #UD\n"));
5945 vmxHCSetPendingXcptUD(pVCpu);
5946 return VINF_HM_PENDING_XCPT;
5947 }
5948 }
5949 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
5950 {
5951 /*
5952 * The guest has not entered VMX operation but attempted to execute a VMX instruction
5953 * (other than VMXON), we need to raise a #UD.
5954 */
5955 Log4Func(("Not in VMX root mode -> #UD\n"));
5956 vmxHCSetPendingXcptUD(pVCpu);
5957 return VINF_HM_PENDING_XCPT;
5958 }
5959
5960 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
5961 return VINF_SUCCESS;
5962}
5963
5964
5965/**
5966 * Decodes the memory operand of an instruction that caused a VM-exit.
5967 *
5968 * The Exit qualification field provides the displacement field for memory
5969 * operand instructions, if any.
5970 *
5971 * @returns Strict VBox status code (i.e. informational status codes too).
5972 * @retval VINF_SUCCESS if the operand was successfully decoded.
5973 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
5974 * operand.
5975 * @param pVCpu The cross context virtual CPU structure.
5976 * @param uExitInstrInfo The VM-exit instruction information field.
5977 * @param enmMemAccess The memory operand's access type (read or write).
5978 * @param GCPtrDisp The instruction displacement field, if any. For
5979 * RIP-relative addressing pass RIP + displacement here.
5980 * @param pGCPtrMem Where to store the effective destination memory address.
5981 *
5982 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
5983 * virtual-8086 mode hence skips those checks while verifying if the
5984 * segment is valid.
5985 */
5986static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
5987 PRTGCPTR pGCPtrMem)
5988{
5989 Assert(pGCPtrMem);
5990 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
5991 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
5992 | CPUMCTX_EXTRN_CR0);
5993
5994 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
5995 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
5996 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
5997
5998 VMXEXITINSTRINFO ExitInstrInfo;
5999 ExitInstrInfo.u = uExitInstrInfo;
6000 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6001 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6002 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6003 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6004 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6005 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6006 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6007 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6008 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6009
6010 /*
6011 * Validate instruction information.
6012 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6013 */
6014 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6015 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6016 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6017 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6018 AssertLogRelMsgReturn(fIsMemOperand,
6019 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6020
6021 /*
6022 * Compute the complete effective address.
6023 *
6024 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6025 * See AMD spec. 4.5.2 "Segment Registers".
6026 */
6027 RTGCPTR GCPtrMem = GCPtrDisp;
6028 if (fBaseRegValid)
6029 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6030 if (fIdxRegValid)
6031 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6032
6033 RTGCPTR const GCPtrOff = GCPtrMem;
6034 if ( !fIsLongMode
6035 || iSegReg >= X86_SREG_FS)
6036 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6037 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6038
6039 /*
6040 * Validate effective address.
6041 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6042 */
6043 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6044 Assert(cbAccess > 0);
6045 if (fIsLongMode)
6046 {
6047 if (X86_IS_CANONICAL(GCPtrMem))
6048 {
6049 *pGCPtrMem = GCPtrMem;
6050 return VINF_SUCCESS;
6051 }
6052
6053 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6054 * "Data Limit Checks in 64-bit Mode". */
6055 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6056 vmxHCSetPendingXcptGP(pVCpu, 0);
6057 return VINF_HM_PENDING_XCPT;
6058 }
6059
6060 /*
6061 * This is a watered down version of iemMemApplySegment().
6062 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6063 * and segment CPL/DPL checks are skipped.
6064 */
6065 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6066 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6067 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6068
6069 /* Check if the segment is present and usable. */
6070 if ( pSel->Attr.n.u1Present
6071 && !pSel->Attr.n.u1Unusable)
6072 {
6073 Assert(pSel->Attr.n.u1DescType);
6074 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6075 {
6076 /* Check permissions for the data segment. */
6077 if ( enmMemAccess == VMXMEMACCESS_WRITE
6078 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6079 {
6080 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6081 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6082 return VINF_HM_PENDING_XCPT;
6083 }
6084
6085 /* Check limits if it's a normal data segment. */
6086 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6087 {
6088 if ( GCPtrFirst32 > pSel->u32Limit
6089 || GCPtrLast32 > pSel->u32Limit)
6090 {
6091 Log4Func(("Data segment limit exceeded. "
6092 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6093 GCPtrLast32, pSel->u32Limit));
6094 if (iSegReg == X86_SREG_SS)
6095 vmxHCSetPendingXcptSS(pVCpu, 0);
6096 else
6097 vmxHCSetPendingXcptGP(pVCpu, 0);
6098 return VINF_HM_PENDING_XCPT;
6099 }
6100 }
6101 else
6102 {
6103 /* Check limits if it's an expand-down data segment.
6104 Note! The upper boundary is defined by the B bit, not the G bit! */
6105 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6106 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6107 {
6108 Log4Func(("Expand-down data segment limit exceeded. "
6109 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6110 GCPtrLast32, pSel->u32Limit));
6111 if (iSegReg == X86_SREG_SS)
6112 vmxHCSetPendingXcptSS(pVCpu, 0);
6113 else
6114 vmxHCSetPendingXcptGP(pVCpu, 0);
6115 return VINF_HM_PENDING_XCPT;
6116 }
6117 }
6118 }
6119 else
6120 {
6121 /* Check permissions for the code segment. */
6122 if ( enmMemAccess == VMXMEMACCESS_WRITE
6123 || ( enmMemAccess == VMXMEMACCESS_READ
6124 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6125 {
6126 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6127 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6128 vmxHCSetPendingXcptGP(pVCpu, 0);
6129 return VINF_HM_PENDING_XCPT;
6130 }
6131
6132 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6133 if ( GCPtrFirst32 > pSel->u32Limit
6134 || GCPtrLast32 > pSel->u32Limit)
6135 {
6136 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6137 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6138 if (iSegReg == X86_SREG_SS)
6139 vmxHCSetPendingXcptSS(pVCpu, 0);
6140 else
6141 vmxHCSetPendingXcptGP(pVCpu, 0);
6142 return VINF_HM_PENDING_XCPT;
6143 }
6144 }
6145 }
6146 else
6147 {
6148 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6149 vmxHCSetPendingXcptGP(pVCpu, 0);
6150 return VINF_HM_PENDING_XCPT;
6151 }
6152
6153 *pGCPtrMem = GCPtrMem;
6154 return VINF_SUCCESS;
6155}
6156#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6157
6158
6159/**
6160 * VM-exit helper for LMSW.
6161 */
6162static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6163{
6164 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6165 AssertRCReturn(rc, rc);
6166
6167 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6168 AssertMsg( rcStrict == VINF_SUCCESS
6169 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6170
6171 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6172 if (rcStrict == VINF_IEM_RAISED_XCPT)
6173 {
6174 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6175 rcStrict = VINF_SUCCESS;
6176 }
6177
6178 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6179 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6180 return rcStrict;
6181}
6182
6183
6184/**
6185 * VM-exit helper for CLTS.
6186 */
6187static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6188{
6189 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6190 AssertRCReturn(rc, rc);
6191
6192 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6193 AssertMsg( rcStrict == VINF_SUCCESS
6194 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6195
6196 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6197 if (rcStrict == VINF_IEM_RAISED_XCPT)
6198 {
6199 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6200 rcStrict = VINF_SUCCESS;
6201 }
6202
6203 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6204 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6205 return rcStrict;
6206}
6207
6208
6209/**
6210 * VM-exit helper for MOV from CRx (CRx read).
6211 */
6212static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6213{
6214 Assert(iCrReg < 16);
6215 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6216
6217 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6218 AssertRCReturn(rc, rc);
6219
6220 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6221 AssertMsg( rcStrict == VINF_SUCCESS
6222 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6223
6224 if (iGReg == X86_GREG_xSP)
6225 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6226 else
6227 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6228#ifdef VBOX_WITH_STATISTICS
6229 switch (iCrReg)
6230 {
6231 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6232 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6233 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6234 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6235 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6236 }
6237#endif
6238 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6239 return rcStrict;
6240}
6241
6242
6243/**
6244 * VM-exit helper for MOV to CRx (CRx write).
6245 */
6246static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6247{
6248 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6249
6250 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6251 AssertMsg( rcStrict == VINF_SUCCESS
6252 || rcStrict == VINF_IEM_RAISED_XCPT
6253 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6254
6255 switch (iCrReg)
6256 {
6257 case 0:
6258 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6259 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6260 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6261 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6262 break;
6263
6264 case 2:
6265 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6266 /* Nothing to do here, CR2 it's not part of the VMCS. */
6267 break;
6268
6269 case 3:
6270 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6271 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6272 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6273 break;
6274
6275 case 4:
6276 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6277 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6278#ifndef IN_NEM_DARWIN
6279 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6280 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6281#else
6282 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6283#endif
6284 break;
6285
6286 case 8:
6287 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6288 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6289 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6290 break;
6291
6292 default:
6293 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6294 break;
6295 }
6296
6297 if (rcStrict == VINF_IEM_RAISED_XCPT)
6298 {
6299 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6300 rcStrict = VINF_SUCCESS;
6301 }
6302 return rcStrict;
6303}
6304
6305
6306/**
6307 * VM-exit exception handler for \#PF (Page-fault exception).
6308 *
6309 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6310 */
6311static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6312{
6313 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6314 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6315
6316#ifndef IN_NEM_DARWIN
6317 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6318 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6319 { /* likely */ }
6320 else
6321#endif
6322 {
6323#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6324 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6325#endif
6326 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6327 if (!pVmxTransient->fVectoringDoublePF)
6328 {
6329 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6330 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6331 }
6332 else
6333 {
6334 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6335 Assert(!pVmxTransient->fIsNestedGuest);
6336 vmxHCSetPendingXcptDF(pVCpu);
6337 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6338 }
6339 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6340 return VINF_SUCCESS;
6341 }
6342
6343 Assert(!pVmxTransient->fIsNestedGuest);
6344
6345 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6346 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6347 if (pVmxTransient->fVectoringPF)
6348 {
6349 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6350 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6351 }
6352
6353 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6354 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6355 AssertRCReturn(rc, rc);
6356
6357 Log4Func(("#PF: cs:rip=%#04x:%#RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
6358 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
6359
6360 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6361 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
6362
6363 Log4Func(("#PF: rc=%Rrc\n", rc));
6364 if (rc == VINF_SUCCESS)
6365 {
6366 /*
6367 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6368 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6369 */
6370 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6371 TRPMResetTrap(pVCpu);
6372 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6373 return rc;
6374 }
6375
6376 if (rc == VINF_EM_RAW_GUEST_TRAP)
6377 {
6378 if (!pVmxTransient->fVectoringDoublePF)
6379 {
6380 /* It's a guest page fault and needs to be reflected to the guest. */
6381 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6382 TRPMResetTrap(pVCpu);
6383 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6384 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6385 uGstErrorCode, pVmxTransient->uExitQual);
6386 }
6387 else
6388 {
6389 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6390 TRPMResetTrap(pVCpu);
6391 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6392 vmxHCSetPendingXcptDF(pVCpu);
6393 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6394 }
6395
6396 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6397 return VINF_SUCCESS;
6398 }
6399
6400 TRPMResetTrap(pVCpu);
6401 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6402 return rc;
6403}
6404
6405
6406/**
6407 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6408 *
6409 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6410 */
6411static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6412{
6413 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6414 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6415
6416 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
6417 AssertRCReturn(rc, rc);
6418
6419 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6420 {
6421 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6422 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6423
6424 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6425 * provides VM-exit instruction length. If this causes problem later,
6426 * disassemble the instruction like it's done on AMD-V. */
6427 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6428 AssertRCReturn(rc2, rc2);
6429 return rc;
6430 }
6431
6432 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6433 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6434 return VINF_SUCCESS;
6435}
6436
6437
6438/**
6439 * VM-exit exception handler for \#BP (Breakpoint exception).
6440 *
6441 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6442 */
6443static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6444{
6445 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6446 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6447
6448 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6449 AssertRCReturn(rc, rc);
6450
6451 VBOXSTRICTRC rcStrict;
6452 if (!pVmxTransient->fIsNestedGuest)
6453 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
6454 else
6455 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6456
6457 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6458 {
6459 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6460 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6461 rcStrict = VINF_SUCCESS;
6462 }
6463
6464 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6465 return rcStrict;
6466}
6467
6468
6469/**
6470 * VM-exit exception handler for \#AC (Alignment-check exception).
6471 *
6472 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6473 */
6474static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6475{
6476 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6477
6478 /*
6479 * Detect #ACs caused by host having enabled split-lock detection.
6480 * Emulate such instructions.
6481 */
6482 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo,
6483 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
6484 AssertRCReturn(rc, rc);
6485 /** @todo detect split lock in cpu feature? */
6486 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6487 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6488 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6489 || CPUMGetGuestCPL(pVCpu) != 3
6490 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6491 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6492 {
6493 /*
6494 * Check for debug/trace events and import state accordingly.
6495 */
6496 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6497 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6498 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
6499#ifndef IN_NEM_DARWIN
6500 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
6501#endif
6502 )
6503 {
6504 if (pVM->cCpus == 1)
6505 {
6506#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6507 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6508#else
6509 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6510#endif
6511 AssertRCReturn(rc, rc);
6512 }
6513 }
6514 else
6515 {
6516 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6517 AssertRCReturn(rc, rc);
6518
6519 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
6520
6521 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
6522 {
6523 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
6524 if (rcStrict != VINF_SUCCESS)
6525 return rcStrict;
6526 }
6527 }
6528
6529 /*
6530 * Emulate the instruction.
6531 *
6532 * We have to ignore the LOCK prefix here as we must not retrigger the
6533 * detection on the host. This isn't all that satisfactory, though...
6534 */
6535 if (pVM->cCpus == 1)
6536 {
6537 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
6538 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6539
6540 /** @todo For SMP configs we should do a rendezvous here. */
6541 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
6542 if (rcStrict == VINF_SUCCESS)
6543#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6544 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6545 HM_CHANGED_GUEST_RIP
6546 | HM_CHANGED_GUEST_RFLAGS
6547 | HM_CHANGED_GUEST_GPRS_MASK
6548 | HM_CHANGED_GUEST_CS
6549 | HM_CHANGED_GUEST_SS);
6550#else
6551 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6552#endif
6553 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6554 {
6555 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6556 rcStrict = VINF_SUCCESS;
6557 }
6558 return rcStrict;
6559 }
6560 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
6561 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6562 return VINF_EM_EMULATE_SPLIT_LOCK;
6563 }
6564
6565 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
6566 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6567 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
6568
6569 /* Re-inject it. We'll detect any nesting before getting here. */
6570 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6571 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6572 return VINF_SUCCESS;
6573}
6574
6575
6576/**
6577 * VM-exit exception handler for \#DB (Debug exception).
6578 *
6579 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6580 */
6581static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6582{
6583 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6584 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
6585
6586 /*
6587 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
6588 */
6589 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6590
6591 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
6592 uint64_t const uDR6 = X86_DR6_INIT_VAL
6593 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
6594 | X86_DR6_BD | X86_DR6_BS));
6595
6596 int rc;
6597 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6598 if (!pVmxTransient->fIsNestedGuest)
6599 {
6600 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6601
6602 /*
6603 * Prevents stepping twice over the same instruction when the guest is stepping using
6604 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
6605 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
6606 */
6607 if ( rc == VINF_EM_DBG_STEPPED
6608 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
6609 {
6610 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6611 rc = VINF_EM_RAW_GUEST_TRAP;
6612 }
6613 }
6614 else
6615 rc = VINF_EM_RAW_GUEST_TRAP;
6616 Log6Func(("rc=%Rrc\n", rc));
6617 if (rc == VINF_EM_RAW_GUEST_TRAP)
6618 {
6619 /*
6620 * The exception was for the guest. Update DR6, DR7.GD and
6621 * IA32_DEBUGCTL.LBR before forwarding it.
6622 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
6623 */
6624#ifndef IN_NEM_DARWIN
6625 VMMRZCallRing3Disable(pVCpu);
6626 HM_DISABLE_PREEMPT(pVCpu);
6627
6628 pCtx->dr[6] &= ~X86_DR6_B_MASK;
6629 pCtx->dr[6] |= uDR6;
6630 if (CPUMIsGuestDebugStateActive(pVCpu))
6631 ASMSetDR6(pCtx->dr[6]);
6632
6633 HM_RESTORE_PREEMPT();
6634 VMMRZCallRing3Enable(pVCpu);
6635#else
6636 /** @todo */
6637#endif
6638
6639 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
6640 AssertRCReturn(rc, rc);
6641
6642 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
6643 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
6644
6645 /* Paranoia. */
6646 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
6647 pCtx->dr[7] |= X86_DR7_RA1_MASK;
6648
6649 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
6650 AssertRC(rc);
6651
6652 /*
6653 * Raise #DB in the guest.
6654 *
6655 * It is important to reflect exactly what the VM-exit gave us (preserving the
6656 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
6657 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
6658 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
6659 *
6660 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
6661 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6662 */
6663 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6664 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6665 return VINF_SUCCESS;
6666 }
6667
6668 /*
6669 * Not a guest trap, must be a hypervisor related debug event then.
6670 * Update DR6 in case someone is interested in it.
6671 */
6672 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
6673 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
6674 CPUMSetHyperDR6(pVCpu, uDR6);
6675
6676 return rc;
6677}
6678
6679
6680/**
6681 * Hacks its way around the lovely mesa driver's backdoor accesses.
6682 *
6683 * @sa hmR0SvmHandleMesaDrvGp.
6684 */
6685static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6686{
6687 LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
6688 RT_NOREF(pCtx);
6689
6690 /* For now we'll just skip the instruction. */
6691 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6692}
6693
6694
6695/**
6696 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
6697 * backdoor logging w/o checking what it is running inside.
6698 *
6699 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
6700 * backdoor port and magic numbers loaded in registers.
6701 *
6702 * @returns true if it is, false if it isn't.
6703 * @sa hmR0SvmIsMesaDrvGp.
6704 */
6705DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6706{
6707 /* 0xed: IN eAX,dx */
6708 uint8_t abInstr[1];
6709 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
6710 return false;
6711
6712 /* Check that it is #GP(0). */
6713 if (pVmxTransient->uExitIntErrorCode != 0)
6714 return false;
6715
6716 /* Check magic and port. */
6717 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
6718 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
6719 if (pCtx->rax != UINT32_C(0x564d5868))
6720 return false;
6721 if (pCtx->dx != UINT32_C(0x5658))
6722 return false;
6723
6724 /* Flat ring-3 CS. */
6725 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
6726 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
6727 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
6728 if (pCtx->cs.Attr.n.u2Dpl != 3)
6729 return false;
6730 if (pCtx->cs.u64Base != 0)
6731 return false;
6732
6733 /* Check opcode. */
6734 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
6735 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
6736 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
6737 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
6738 if (RT_FAILURE(rc))
6739 return false;
6740 if (abInstr[0] != 0xed)
6741 return false;
6742
6743 return true;
6744}
6745
6746
6747/**
6748 * VM-exit exception handler for \#GP (General-protection exception).
6749 *
6750 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6751 */
6752static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6753{
6754 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6755 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
6756
6757 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6758 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6759#ifndef IN_NEM_DARWIN
6760 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
6761 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6762 { /* likely */ }
6763 else
6764#endif
6765 {
6766#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6767# ifndef IN_NEM_DARWIN
6768 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6769# else
6770 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6771# endif
6772#endif
6773 /*
6774 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
6775 * executing a nested-guest, reflect #GP to the guest or nested-guest.
6776 */
6777 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6778 AssertRCReturn(rc, rc);
6779 Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
6780 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
6781
6782 if ( pVmxTransient->fIsNestedGuest
6783 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
6784 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
6785 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6786 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6787 else
6788 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
6789 return rc;
6790 }
6791
6792#ifndef IN_NEM_DARWIN
6793 Assert(CPUMIsGuestInRealModeEx(pCtx));
6794 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
6795 Assert(!pVmxTransient->fIsNestedGuest);
6796
6797 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6798 AssertRCReturn(rc, rc);
6799
6800 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
6801 if (rcStrict == VINF_SUCCESS)
6802 {
6803 if (!CPUMIsGuestInRealModeEx(pCtx))
6804 {
6805 /*
6806 * The guest is no longer in real-mode, check if we can continue executing the
6807 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
6808 */
6809 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
6810 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
6811 {
6812 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
6813 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6814 }
6815 else
6816 {
6817 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
6818 rcStrict = VINF_EM_RESCHEDULE;
6819 }
6820 }
6821 else
6822 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6823 }
6824 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6825 {
6826 rcStrict = VINF_SUCCESS;
6827 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6828 }
6829 return VBOXSTRICTRC_VAL(rcStrict);
6830#endif
6831}
6832
6833
6834/**
6835 * VM-exit exception handler wrapper for all other exceptions that are not handled
6836 * by a specific handler.
6837 *
6838 * This simply re-injects the exception back into the VM without any special
6839 * processing.
6840 *
6841 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6842 */
6843static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6844{
6845 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6846
6847#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6848# ifndef IN_NEM_DARWIN
6849 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6850 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
6851 ("uVector=%#x u32XcptBitmap=%#X32\n",
6852 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
6853 NOREF(pVmcsInfo);
6854# endif
6855#endif
6856
6857 /*
6858 * Re-inject the exception into the guest. This cannot be a double-fault condition which
6859 * would have been handled while checking exits due to event delivery.
6860 */
6861 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6862
6863#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6864 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6865 AssertRCReturn(rc, rc);
6866 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6867#endif
6868
6869#ifdef VBOX_WITH_STATISTICS
6870 switch (uVector)
6871 {
6872 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
6873 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
6874 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
6875 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6876 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
6877 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
6878 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6879 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
6880 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
6881 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
6882 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
6883 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
6884 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
6885 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
6886 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
6887 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
6888 default:
6889 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
6890 break;
6891 }
6892#endif
6893
6894 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
6895 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
6896 NOREF(uVector);
6897
6898 /* Re-inject the original exception into the guest. */
6899 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6900 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6901 return VINF_SUCCESS;
6902}
6903
6904
6905/**
6906 * VM-exit exception handler for all exceptions (except NMIs!).
6907 *
6908 * @remarks This may be called for both guests and nested-guests. Take care to not
6909 * make assumptions and avoid doing anything that is not relevant when
6910 * executing a nested-guest (e.g., Mesa driver hacks).
6911 */
6912static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6913{
6914 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6915
6916 /*
6917 * If this VM-exit occurred while delivering an event through the guest IDT, take
6918 * action based on the return code and additional hints (e.g. for page-faults)
6919 * that will be updated in the VMX transient structure.
6920 */
6921 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
6922 if (rcStrict == VINF_SUCCESS)
6923 {
6924 /*
6925 * If an exception caused a VM-exit due to delivery of an event, the original
6926 * event may have to be re-injected into the guest. We shall reinject it and
6927 * continue guest execution. However, page-fault is a complicated case and
6928 * needs additional processing done in vmxHCExitXcptPF().
6929 */
6930 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
6931 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6932 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
6933 || uVector == X86_XCPT_PF)
6934 {
6935 switch (uVector)
6936 {
6937 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
6938 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
6939 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
6940 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
6941 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
6942 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
6943 default:
6944 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
6945 }
6946 }
6947 /* else: inject pending event before resuming guest execution. */
6948 }
6949 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
6950 {
6951 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6952 rcStrict = VINF_SUCCESS;
6953 }
6954
6955 return rcStrict;
6956}
6957/** @} */
6958
6959
6960/** @name VM-exit handlers.
6961 * @{
6962 */
6963/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6964/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6965/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6966
6967/**
6968 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
6969 */
6970HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6971{
6972 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6973 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
6974
6975#ifndef IN_NEM_DARWIN
6976 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
6977 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
6978 return VINF_SUCCESS;
6979 return VINF_EM_RAW_INTERRUPT;
6980#else
6981 return VINF_SUCCESS;
6982#endif
6983}
6984
6985
6986/**
6987 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
6988 * VM-exit.
6989 */
6990HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6991{
6992 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6993 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
6994
6995 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
6996
6997 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
6998 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6999 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7000
7001 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7002 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7003 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7004 NOREF(pVmcsInfo);
7005
7006 VBOXSTRICTRC rcStrict;
7007 switch (uExitIntType)
7008 {
7009#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7010 /*
7011 * Host physical NMIs:
7012 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7013 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7014 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7015 *
7016 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7017 * See Intel spec. 27.5.5 "Updating Non-Register State".
7018 */
7019 case VMX_EXIT_INT_INFO_TYPE_NMI:
7020 {
7021 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7022 break;
7023 }
7024#endif
7025
7026 /*
7027 * Privileged software exceptions (#DB from ICEBP),
7028 * Software exceptions (#BP and #OF),
7029 * Hardware exceptions:
7030 * Process the required exceptions and resume guest execution if possible.
7031 */
7032 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7033 Assert(uVector == X86_XCPT_DB);
7034 RT_FALL_THRU();
7035 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7036 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7037 RT_FALL_THRU();
7038 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7039 {
7040 NOREF(uVector);
7041 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
7042 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7043 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
7044 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
7045
7046 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7047 break;
7048 }
7049
7050 default:
7051 {
7052 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7053 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7054 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7055 break;
7056 }
7057 }
7058
7059 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7060 return rcStrict;
7061}
7062
7063
7064/**
7065 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7066 */
7067HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7068{
7069 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7070
7071 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7072 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7073 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7074
7075 /* Evaluate and deliver pending events and resume guest execution. */
7076 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7077 return VINF_SUCCESS;
7078}
7079
7080
7081/**
7082 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7083 */
7084HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7085{
7086 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7087
7088 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7089 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7090 {
7091 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7092 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7093 }
7094
7095 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
7096
7097 /*
7098 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7099 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7100 */
7101 uint32_t fIntrState;
7102 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7103 AssertRC(rc);
7104 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7105 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7106 {
7107 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7108 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7109
7110 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7111 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7112 AssertRC(rc);
7113 }
7114
7115 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7116 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7117
7118 /* Evaluate and deliver pending events and resume guest execution. */
7119 return VINF_SUCCESS;
7120}
7121
7122
7123/**
7124 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7125 */
7126HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7127{
7128 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7129 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7130}
7131
7132
7133/**
7134 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7135 */
7136HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7137{
7138 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7139 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7140}
7141
7142
7143/**
7144 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7145 */
7146HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7147{
7148 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7149
7150 /*
7151 * Get the state we need and update the exit history entry.
7152 */
7153 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7154 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7155
7156 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7157 AssertRCReturn(rc, rc);
7158
7159 VBOXSTRICTRC rcStrict;
7160 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7161 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7162 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7163 if (!pExitRec)
7164 {
7165 /*
7166 * Regular CPUID instruction execution.
7167 */
7168 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7169 if (rcStrict == VINF_SUCCESS)
7170 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7171 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7172 {
7173 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7174 rcStrict = VINF_SUCCESS;
7175 }
7176 }
7177 else
7178 {
7179 /*
7180 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7181 */
7182 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7183 AssertRCReturn(rc2, rc2);
7184
7185 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7186 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7187
7188 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7189 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7190
7191 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7192 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7193 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7194 }
7195 return rcStrict;
7196}
7197
7198
7199/**
7200 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7201 */
7202HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7203{
7204 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7205
7206 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7207 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
7208 AssertRCReturn(rc, rc);
7209
7210 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7211 return VINF_EM_RAW_EMULATE_INSTR;
7212
7213 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7214 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7215}
7216
7217
7218/**
7219 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7220 */
7221HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7222{
7223 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7224
7225 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7226 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7227 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
7228 AssertRCReturn(rc, rc);
7229
7230 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7231 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7232 {
7233 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7234 we must reset offsetting on VM-entry. See @bugref{6634}. */
7235 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7236 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7237 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7238 }
7239 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7240 {
7241 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7242 rcStrict = VINF_SUCCESS;
7243 }
7244 return rcStrict;
7245}
7246
7247
7248/**
7249 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7250 */
7251HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7252{
7253 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7254
7255 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7256 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7257 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
7258 AssertRCReturn(rc, rc);
7259
7260 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7261 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7262 {
7263 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7264 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7265 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7266 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7267 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7268 }
7269 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7270 {
7271 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7272 rcStrict = VINF_SUCCESS;
7273 }
7274 return rcStrict;
7275}
7276
7277
7278/**
7279 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7280 */
7281HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7282{
7283 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7284
7285 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7286 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
7287 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
7288 AssertRCReturn(rc, rc);
7289
7290 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7291 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7292 if (RT_LIKELY(rc == VINF_SUCCESS))
7293 {
7294 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7295 Assert(pVmxTransient->cbExitInstr == 2);
7296 }
7297 else
7298 {
7299 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7300 rc = VERR_EM_INTERPRETER;
7301 }
7302 return rc;
7303}
7304
7305
7306/**
7307 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7308 */
7309HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7310{
7311 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7312
7313 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7314 if (EMAreHypercallInstructionsEnabled(pVCpu))
7315 {
7316 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7317 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
7318 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
7319 AssertRCReturn(rc, rc);
7320
7321 /* Perform the hypercall. */
7322 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7323 if (rcStrict == VINF_SUCCESS)
7324 {
7325 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7326 AssertRCReturn(rc, rc);
7327 }
7328 else
7329 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7330 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7331 || RT_FAILURE(rcStrict));
7332
7333 /* If the hypercall changes anything other than guest's general-purpose registers,
7334 we would need to reload the guest changed bits here before VM-entry. */
7335 }
7336 else
7337 Log4Func(("Hypercalls not enabled\n"));
7338
7339 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7340 if (RT_FAILURE(rcStrict))
7341 {
7342 vmxHCSetPendingXcptUD(pVCpu);
7343 rcStrict = VINF_SUCCESS;
7344 }
7345
7346 return rcStrict;
7347}
7348
7349
7350/**
7351 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7352 */
7353HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7354{
7355 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7356#ifndef IN_NEM_DARWIN
7357 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7358#endif
7359
7360 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7361 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7362 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7363 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7364 AssertRCReturn(rc, rc);
7365
7366 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7367
7368 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7369 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7370 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7371 {
7372 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7373 rcStrict = VINF_SUCCESS;
7374 }
7375 else
7376 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7377 VBOXSTRICTRC_VAL(rcStrict)));
7378 return rcStrict;
7379}
7380
7381
7382/**
7383 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7384 */
7385HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7386{
7387 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7388
7389 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7390 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7391 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
7392 AssertRCReturn(rc, rc);
7393
7394 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7395 if (rcStrict == VINF_SUCCESS)
7396 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7397 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7398 {
7399 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7400 rcStrict = VINF_SUCCESS;
7401 }
7402
7403 return rcStrict;
7404}
7405
7406
7407/**
7408 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7409 */
7410HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7411{
7412 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7413
7414 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7415 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7416 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7417 AssertRCReturn(rc, rc);
7418
7419 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7420 if (RT_SUCCESS(rcStrict))
7421 {
7422 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7423 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7424 rcStrict = VINF_SUCCESS;
7425 }
7426
7427 return rcStrict;
7428}
7429
7430
7431/**
7432 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7433 * VM-exit.
7434 */
7435HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7436{
7437 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7438 return VINF_EM_RESET;
7439}
7440
7441
7442/**
7443 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7444 */
7445HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7446{
7447 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7448
7449 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7450 AssertRCReturn(rc, rc);
7451
7452 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7453 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
7454 rc = VINF_SUCCESS;
7455 else
7456 rc = VINF_EM_HALT;
7457
7458 if (rc != VINF_SUCCESS)
7459 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
7460 return rc;
7461}
7462
7463
7464/**
7465 * VM-exit handler for instructions that result in a \#UD exception delivered to
7466 * the guest.
7467 */
7468HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7469{
7470 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7471 vmxHCSetPendingXcptUD(pVCpu);
7472 return VINF_SUCCESS;
7473}
7474
7475
7476/**
7477 * VM-exit handler for expiry of the VMX-preemption timer.
7478 */
7479HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7480{
7481 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7482
7483 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
7484 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7485Log12(("vmxHCExitPreemptTimer:\n"));
7486
7487 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7488 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7489 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7490 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
7491 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7492}
7493
7494
7495/**
7496 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7497 */
7498HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7499{
7500 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7501
7502 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7503 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7504 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
7505 AssertRCReturn(rc, rc);
7506
7507 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
7508 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
7509 : HM_CHANGED_RAISED_XCPT_MASK);
7510
7511#ifndef IN_NEM_DARWIN
7512 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7513 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
7514 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
7515 {
7516 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
7517 hmR0VmxUpdateStartVmFunction(pVCpu);
7518 }
7519#endif
7520
7521 return rcStrict;
7522}
7523
7524
7525/**
7526 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7527 */
7528HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7529{
7530 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7531
7532 /** @todo Enable the new code after finding a reliably guest test-case. */
7533#if 1
7534 return VERR_EM_INTERPRETER;
7535#else
7536 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7537 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
7538 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7539 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
7540 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7541 AssertRCReturn(rc, rc);
7542
7543 /* Paranoia. Ensure this has a memory operand. */
7544 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
7545
7546 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
7547 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
7548 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
7549 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
7550
7551 RTGCPTR GCPtrDesc;
7552 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
7553
7554 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
7555 GCPtrDesc, uType);
7556 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7557 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7558 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7559 {
7560 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7561 rcStrict = VINF_SUCCESS;
7562 }
7563 return rcStrict;
7564#endif
7565}
7566
7567
7568/**
7569 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
7570 * VM-exit.
7571 */
7572HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7573{
7574 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7575 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7576 AssertRCReturn(rc, rc);
7577
7578 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
7579 if (RT_FAILURE(rc))
7580 return rc;
7581
7582 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
7583 NOREF(uInvalidReason);
7584
7585#ifdef VBOX_STRICT
7586 uint32_t fIntrState;
7587 uint64_t u64Val;
7588 vmxHCReadEntryIntInfoVmcs(pVCpu, pVmxTransient);
7589 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
7590 vmxHCReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7591
7592 Log4(("uInvalidReason %u\n", uInvalidReason));
7593 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
7594 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7595 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7596
7597 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
7598 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
7599 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
7600 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
7601 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
7602 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
7603 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
7604 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7605 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
7606 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
7607 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
7608 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7609# ifndef IN_NEM_DARWIN
7610 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
7611 {
7612 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7613 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7614 }
7615
7616 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
7617# endif
7618#endif
7619
7620 return VERR_VMX_INVALID_GUEST_STATE;
7621}
7622
7623/**
7624 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
7625 */
7626HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7627{
7628 /*
7629 * Cumulative notes of all recognized but unexpected VM-exits.
7630 *
7631 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
7632 * nested-paging is used.
7633 *
7634 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
7635 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
7636 * this function (and thereby stop VM execution) for handling such instructions.
7637 *
7638 *
7639 * VMX_EXIT_INIT_SIGNAL:
7640 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
7641 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
7642 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
7643 *
7644 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
7645 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
7646 * See Intel spec. "23.8 Restrictions on VMX operation".
7647 *
7648 * VMX_EXIT_SIPI:
7649 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
7650 * activity state is used. We don't make use of it as our guests don't have direct
7651 * access to the host local APIC.
7652 *
7653 * See Intel spec. 25.3 "Other Causes of VM-exits".
7654 *
7655 * VMX_EXIT_IO_SMI:
7656 * VMX_EXIT_SMI:
7657 * This can only happen if we support dual-monitor treatment of SMI, which can be
7658 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
7659 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
7660 * VMX root mode or receive an SMI. If we get here, something funny is going on.
7661 *
7662 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
7663 * See Intel spec. 25.3 "Other Causes of VM-Exits"
7664 *
7665 * VMX_EXIT_ERR_MSR_LOAD:
7666 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
7667 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
7668 * execution.
7669 *
7670 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
7671 *
7672 * VMX_EXIT_ERR_MACHINE_CHECK:
7673 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
7674 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
7675 * #MC exception abort class exception is raised. We thus cannot assume a
7676 * reasonable chance of continuing any sort of execution and we bail.
7677 *
7678 * See Intel spec. 15.1 "Machine-check Architecture".
7679 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
7680 *
7681 * VMX_EXIT_PML_FULL:
7682 * VMX_EXIT_VIRTUALIZED_EOI:
7683 * VMX_EXIT_APIC_WRITE:
7684 * We do not currently support any of these features and thus they are all unexpected
7685 * VM-exits.
7686 *
7687 * VMX_EXIT_GDTR_IDTR_ACCESS:
7688 * VMX_EXIT_LDTR_TR_ACCESS:
7689 * VMX_EXIT_RDRAND:
7690 * VMX_EXIT_RSM:
7691 * VMX_EXIT_VMFUNC:
7692 * VMX_EXIT_ENCLS:
7693 * VMX_EXIT_RDSEED:
7694 * VMX_EXIT_XSAVES:
7695 * VMX_EXIT_XRSTORS:
7696 * VMX_EXIT_UMWAIT:
7697 * VMX_EXIT_TPAUSE:
7698 * VMX_EXIT_LOADIWKEY:
7699 * These VM-exits are -not- caused unconditionally by execution of the corresponding
7700 * instruction. Any VM-exit for these instructions indicate a hardware problem,
7701 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
7702 *
7703 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
7704 */
7705 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7706 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
7707 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7708}
7709
7710
7711/**
7712 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7713 */
7714HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7715{
7716 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7717
7718 /** @todo Optimize this: We currently drag in the whole MSR state
7719 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7720 * MSRs required. That would require changes to IEM and possibly CPUM too.
7721 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7722 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7723 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7724 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7725 switch (idMsr)
7726 {
7727 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7728 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7729 }
7730
7731 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7732 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7733 AssertRCReturn(rc, rc);
7734
7735 Log4Func(("ecx=%#RX32\n", idMsr));
7736
7737#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7738 Assert(!pVmxTransient->fIsNestedGuest);
7739 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7740 {
7741 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
7742 && idMsr != MSR_K6_EFER)
7743 {
7744 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
7745 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7746 }
7747 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7748 {
7749 Assert(pVmcsInfo->pvMsrBitmap);
7750 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7751 if (fMsrpm & VMXMSRPM_ALLOW_RD)
7752 {
7753 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
7754 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7755 }
7756 }
7757 }
7758#endif
7759
7760 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
7761 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
7762 if (rcStrict == VINF_SUCCESS)
7763 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7764 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7765 {
7766 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7767 rcStrict = VINF_SUCCESS;
7768 }
7769 else
7770 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
7771 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7772
7773 return rcStrict;
7774}
7775
7776
7777/**
7778 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7779 */
7780HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7781{
7782 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7783
7784 /** @todo Optimize this: We currently drag in the whole MSR state
7785 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7786 * MSRs required. That would require changes to IEM and possibly CPUM too.
7787 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7788 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7789 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7790
7791 /*
7792 * The FS and GS base MSRs are not part of the above all-MSRs mask.
7793 * Although we don't need to fetch the base as it will be overwritten shortly, while
7794 * loading guest-state we would also load the entire segment register including limit
7795 * and attributes and thus we need to load them here.
7796 */
7797 switch (idMsr)
7798 {
7799 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7800 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7801 }
7802
7803 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7804 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7805 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7806 AssertRCReturn(rc, rc);
7807
7808 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
7809
7810 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
7811 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
7812
7813 if (rcStrict == VINF_SUCCESS)
7814 {
7815 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7816
7817 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7818 if ( idMsr == MSR_IA32_APICBASE
7819 || ( idMsr >= MSR_IA32_X2APIC_START
7820 && idMsr <= MSR_IA32_X2APIC_END))
7821 {
7822 /*
7823 * We've already saved the APIC related guest-state (TPR) in post-run phase.
7824 * When full APIC register virtualization is implemented we'll have to make
7825 * sure APIC state is saved from the VMCS before IEM changes it.
7826 */
7827 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7828 }
7829 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7830 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7831 else if (idMsr == MSR_K6_EFER)
7832 {
7833 /*
7834 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
7835 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
7836 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
7837 */
7838 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
7839 }
7840
7841 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
7842 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
7843 {
7844 switch (idMsr)
7845 {
7846 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
7847 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
7848 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
7849 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
7850 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
7851 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
7852 default:
7853 {
7854#ifndef IN_NEM_DARWIN
7855 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7856 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
7857 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7858 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
7859#else
7860 AssertMsgFailed(("TODO\n"));
7861#endif
7862 break;
7863 }
7864 }
7865 }
7866#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7867 else
7868 {
7869 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7870 switch (idMsr)
7871 {
7872 case MSR_IA32_SYSENTER_CS:
7873 case MSR_IA32_SYSENTER_EIP:
7874 case MSR_IA32_SYSENTER_ESP:
7875 case MSR_K8_FS_BASE:
7876 case MSR_K8_GS_BASE:
7877 {
7878 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
7879 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7880 }
7881
7882 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
7883 default:
7884 {
7885 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7886 {
7887 /* EFER MSR writes are always intercepted. */
7888 if (idMsr != MSR_K6_EFER)
7889 {
7890 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
7891 idMsr));
7892 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7893 }
7894 }
7895
7896 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7897 {
7898 Assert(pVmcsInfo->pvMsrBitmap);
7899 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7900 if (fMsrpm & VMXMSRPM_ALLOW_WR)
7901 {
7902 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
7903 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7904 }
7905 }
7906 break;
7907 }
7908 }
7909 }
7910#endif /* VBOX_STRICT */
7911 }
7912 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7913 {
7914 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7915 rcStrict = VINF_SUCCESS;
7916 }
7917 else
7918 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
7919 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7920
7921 return rcStrict;
7922}
7923
7924
7925/**
7926 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
7927 */
7928HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7929{
7930 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7931
7932 /** @todo The guest has likely hit a contended spinlock. We might want to
7933 * poke a schedule different guest VCPU. */
7934 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7935 if (RT_SUCCESS(rc))
7936 return VINF_EM_RAW_INTERRUPT;
7937
7938 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
7939 return rc;
7940}
7941
7942
7943/**
7944 * VM-exit handler for when the TPR value is lowered below the specified
7945 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
7946 */
7947HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7948{
7949 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7950 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
7951
7952 /*
7953 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
7954 * We'll re-evaluate pending interrupts and inject them before the next VM
7955 * entry so we can just continue execution here.
7956 */
7957 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
7958 return VINF_SUCCESS;
7959}
7960
7961
7962/**
7963 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
7964 * VM-exit.
7965 *
7966 * @retval VINF_SUCCESS when guest execution can continue.
7967 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
7968 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
7969 * incompatible guest state for VMX execution (real-on-v86 case).
7970 */
7971HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7972{
7973 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7974 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
7975
7976 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7977 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7978 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7979
7980 VBOXSTRICTRC rcStrict;
7981 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7982 uint64_t const uExitQual = pVmxTransient->uExitQual;
7983 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
7984 switch (uAccessType)
7985 {
7986 /*
7987 * MOV to CRx.
7988 */
7989 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
7990 {
7991 /*
7992 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
7993 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
7994 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
7995 * PAE PDPTEs as well.
7996 */
7997 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
7998 AssertRCReturn(rc, rc);
7999
8000 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8001#ifndef IN_NEM_DARWIN
8002 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8003#endif
8004 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8005 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8006
8007 /*
8008 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8009 * - When nested paging isn't used.
8010 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8011 * - We are executing in the VM debug loop.
8012 */
8013#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8014# ifndef IN_NEM_DARWIN
8015 Assert( iCrReg != 3
8016 || !VM_IS_VMX_NESTED_PAGING(pVM)
8017 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8018 || pVCpu->hmr0.s.fUsingDebugLoop);
8019# else
8020 Assert( iCrReg != 3
8021 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8022# endif
8023#endif
8024
8025 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8026 Assert( iCrReg != 8
8027 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8028
8029 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8030 AssertMsg( rcStrict == VINF_SUCCESS
8031 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8032
8033#ifndef IN_NEM_DARWIN
8034 /*
8035 * This is a kludge for handling switches back to real mode when we try to use
8036 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8037 * deal with special selector values, so we have to return to ring-3 and run
8038 * there till the selector values are V86 mode compatible.
8039 *
8040 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8041 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8042 * this function.
8043 */
8044 if ( iCrReg == 0
8045 && rcStrict == VINF_SUCCESS
8046 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8047 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8048 && (uOldCr0 & X86_CR0_PE)
8049 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8050 {
8051 /** @todo Check selectors rather than returning all the time. */
8052 Assert(!pVmxTransient->fIsNestedGuest);
8053 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8054 rcStrict = VINF_EM_RESCHEDULE_REM;
8055 }
8056#endif
8057
8058 break;
8059 }
8060
8061 /*
8062 * MOV from CRx.
8063 */
8064 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8065 {
8066 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8067 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8068
8069 /*
8070 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8071 * - When nested paging isn't used.
8072 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8073 * - We are executing in the VM debug loop.
8074 */
8075#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8076# ifndef IN_NEM_DARWIN
8077 Assert( iCrReg != 3
8078 || !VM_IS_VMX_NESTED_PAGING(pVM)
8079 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8080 || pVCpu->hmr0.s.fLeaveDone);
8081# else
8082 Assert( iCrReg != 3
8083 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8084# endif
8085#endif
8086
8087 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8088 Assert( iCrReg != 8
8089 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8090
8091 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8092 break;
8093 }
8094
8095 /*
8096 * CLTS (Clear Task-Switch Flag in CR0).
8097 */
8098 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8099 {
8100 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8101 break;
8102 }
8103
8104 /*
8105 * LMSW (Load Machine-Status Word into CR0).
8106 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8107 */
8108 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8109 {
8110 RTGCPTR GCPtrEffDst;
8111 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8112 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8113 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8114 if (fMemOperand)
8115 {
8116 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
8117 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8118 }
8119 else
8120 GCPtrEffDst = NIL_RTGCPTR;
8121 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8122 break;
8123 }
8124
8125 default:
8126 {
8127 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8128 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8129 }
8130 }
8131
8132 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8133 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8134 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8135
8136 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8137 NOREF(pVM);
8138 return rcStrict;
8139}
8140
8141
8142/**
8143 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8144 * VM-exit.
8145 */
8146HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8147{
8148 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8149 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8150
8151 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8152 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8153 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8154 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8155 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
8156 | CPUMCTX_EXTRN_EFER);
8157 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8158 AssertRCReturn(rc, rc);
8159
8160 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8161 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8162 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8163 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8164 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8165 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8166 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8167 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8168
8169 /*
8170 * Update exit history to see if this exit can be optimized.
8171 */
8172 VBOXSTRICTRC rcStrict;
8173 PCEMEXITREC pExitRec = NULL;
8174 if ( !fGstStepping
8175 && !fDbgStepping)
8176 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8177 !fIOString
8178 ? !fIOWrite
8179 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8180 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8181 : !fIOWrite
8182 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8183 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8184 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8185 if (!pExitRec)
8186 {
8187 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8188 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8189
8190 uint32_t const cbValue = s_aIOSizes[uIOSize];
8191 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8192 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8193 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8194 if (fIOString)
8195 {
8196 /*
8197 * INS/OUTS - I/O String instruction.
8198 *
8199 * Use instruction-information if available, otherwise fall back on
8200 * interpreting the instruction.
8201 */
8202 Log4Func(("cs:rip=%#04x:%#RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8203 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8204 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8205 if (fInsOutsInfo)
8206 {
8207 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8208 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8209 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8210 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8211 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8212 if (fIOWrite)
8213 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8214 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8215 else
8216 {
8217 /*
8218 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8219 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8220 * See Intel Instruction spec. for "INS".
8221 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8222 */
8223 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8224 }
8225 }
8226 else
8227 rcStrict = IEMExecOne(pVCpu);
8228
8229 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8230 fUpdateRipAlready = true;
8231 }
8232 else
8233 {
8234 /*
8235 * IN/OUT - I/O instruction.
8236 */
8237 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8238 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8239 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8240 if (fIOWrite)
8241 {
8242 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8243 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8244#ifndef IN_NEM_DARWIN
8245 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8246 && !pCtx->eflags.Bits.u1TF)
8247 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8248#endif
8249 }
8250 else
8251 {
8252 uint32_t u32Result = 0;
8253 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8254 if (IOM_SUCCESS(rcStrict))
8255 {
8256 /* Save result of I/O IN instr. in AL/AX/EAX. */
8257 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8258 }
8259#ifndef IN_NEM_DARWIN
8260 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8261 && !pCtx->eflags.Bits.u1TF)
8262 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8263#endif
8264 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8265 }
8266 }
8267
8268 if (IOM_SUCCESS(rcStrict))
8269 {
8270 if (!fUpdateRipAlready)
8271 {
8272 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8273 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8274 }
8275
8276 /*
8277 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8278 * while booting Fedora 17 64-bit guest.
8279 *
8280 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8281 */
8282 if (fIOString)
8283 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8284
8285 /*
8286 * If any I/O breakpoints are armed, we need to check if one triggered
8287 * and take appropriate action.
8288 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8289 */
8290 rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
8291 AssertRCReturn(rc, rc);
8292
8293 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8294 * execution engines about whether hyper BPs and such are pending. */
8295 uint32_t const uDr7 = pCtx->dr[7];
8296 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8297 && X86_DR7_ANY_RW_IO(uDr7)
8298 && (pCtx->cr4 & X86_CR4_DE))
8299 || DBGFBpIsHwIoArmed(pVM)))
8300 {
8301 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8302
8303#ifndef IN_NEM_DARWIN
8304 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8305 VMMRZCallRing3Disable(pVCpu);
8306 HM_DISABLE_PREEMPT(pVCpu);
8307
8308 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8309
8310 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8311 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8312 {
8313 /* Raise #DB. */
8314 if (fIsGuestDbgActive)
8315 ASMSetDR6(pCtx->dr[6]);
8316 if (pCtx->dr[7] != uDr7)
8317 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8318
8319 vmxHCSetPendingXcptDB(pVCpu);
8320 }
8321 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8322 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8323 else if ( rcStrict2 != VINF_SUCCESS
8324 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8325 rcStrict = rcStrict2;
8326 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8327
8328 HM_RESTORE_PREEMPT();
8329 VMMRZCallRing3Enable(pVCpu);
8330#else
8331 /** @todo */
8332#endif
8333 }
8334 }
8335
8336#ifdef VBOX_STRICT
8337 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8338 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8339 Assert(!fIOWrite);
8340 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8341 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8342 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8343 Assert(fIOWrite);
8344 else
8345 {
8346# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8347 * statuses, that the VMM device and some others may return. See
8348 * IOM_SUCCESS() for guidance. */
8349 AssertMsg( RT_FAILURE(rcStrict)
8350 || rcStrict == VINF_SUCCESS
8351 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8352 || rcStrict == VINF_EM_DBG_BREAKPOINT
8353 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8354 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8355# endif
8356 }
8357#endif
8358 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8359 }
8360 else
8361 {
8362 /*
8363 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8364 */
8365 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8366 AssertRCReturn(rc2, rc2);
8367 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8368 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8369 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8370 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8371 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8372 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8373
8374 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8375 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8376
8377 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8378 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8379 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8380 }
8381 return rcStrict;
8382}
8383
8384
8385/**
8386 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8387 * VM-exit.
8388 */
8389HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8390{
8391 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8392
8393 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8394 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8395 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8396 {
8397 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8398 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8399 {
8400 uint32_t uErrCode;
8401 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8402 {
8403 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8404 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8405 }
8406 else
8407 uErrCode = 0;
8408
8409 RTGCUINTPTR GCPtrFaultAddress;
8410 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8411 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8412 else
8413 GCPtrFaultAddress = 0;
8414
8415 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8416
8417 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8418 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8419
8420 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8421 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8422 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8423 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8424 }
8425 }
8426
8427 /* Fall back to the interpreter to emulate the task-switch. */
8428 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8429 return VERR_EM_INTERPRETER;
8430}
8431
8432
8433/**
8434 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8435 */
8436HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8437{
8438 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8439
8440 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8441 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
8442 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8443 AssertRC(rc);
8444 return VINF_EM_DBG_STEPPED;
8445}
8446
8447
8448/**
8449 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8450 */
8451HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8452{
8453 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8454 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
8455
8456 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8457 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8458 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8459 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8460 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8461
8462 /*
8463 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8464 */
8465 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8466 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8467 {
8468 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
8469 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
8470 {
8471 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8472 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8473 }
8474 }
8475 else
8476 {
8477 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8478 return rcStrict;
8479 }
8480
8481 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
8482 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8483 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8484 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8485 AssertRCReturn(rc, rc);
8486
8487 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8488 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
8489 switch (uAccessType)
8490 {
8491#ifndef IN_NEM_DARWIN
8492 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8493 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8494 {
8495 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
8496 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
8497 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8498
8499 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
8500 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
8501 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
8502 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
8503 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
8504
8505 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
8506 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
8507 Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8508 if ( rcStrict == VINF_SUCCESS
8509 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8510 || rcStrict == VERR_PAGE_NOT_PRESENT)
8511 {
8512 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8513 | HM_CHANGED_GUEST_APIC_TPR);
8514 rcStrict = VINF_SUCCESS;
8515 }
8516 break;
8517 }
8518#else
8519 /** @todo */
8520#endif
8521
8522 default:
8523 {
8524 Log4Func(("uAccessType=%#x\n", uAccessType));
8525 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
8526 break;
8527 }
8528 }
8529
8530 if (rcStrict != VINF_SUCCESS)
8531 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
8532 return rcStrict;
8533}
8534
8535
8536/**
8537 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8538 * VM-exit.
8539 */
8540HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8541{
8542 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8543 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8544
8545 /*
8546 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
8547 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
8548 * must emulate the MOV DRx access.
8549 */
8550 if (!pVmxTransient->fIsNestedGuest)
8551 {
8552 /* We should -not- get this VM-exit if the guest's debug registers were active. */
8553 if (pVmxTransient->fWasGuestDebugStateActive)
8554 {
8555 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
8556 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8557 }
8558
8559 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
8560 && !pVmxTransient->fWasHyperDebugStateActive)
8561 {
8562 Assert(!DBGFIsStepping(pVCpu));
8563 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
8564
8565 /* Don't intercept MOV DRx any more. */
8566 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
8567 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8568 AssertRC(rc);
8569
8570#ifndef IN_NEM_DARWIN
8571 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
8572 VMMRZCallRing3Disable(pVCpu);
8573 HM_DISABLE_PREEMPT(pVCpu);
8574
8575 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8576 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
8577 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8578
8579 HM_RESTORE_PREEMPT();
8580 VMMRZCallRing3Enable(pVCpu);
8581#else
8582 CPUMR3NemActivateGuestDebugState(pVCpu);
8583 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8584 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
8585#endif
8586
8587#ifdef VBOX_WITH_STATISTICS
8588 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8589 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8590 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8591 else
8592 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8593#endif
8594 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
8595 return VINF_SUCCESS;
8596 }
8597 }
8598
8599 /*
8600 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
8601 * The EFER MSR is always up-to-date.
8602 * Update the segment registers and DR7 from the CPU.
8603 */
8604 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8605 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8606 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
8607 AssertRCReturn(rc, rc);
8608 Log4Func(("cs:rip=%#04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
8609
8610 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8611 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8612 {
8613 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8614 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
8615 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
8616 if (RT_SUCCESS(rc))
8617 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
8618 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8619 }
8620 else
8621 {
8622 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8623 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
8624 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
8625 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8626 }
8627
8628 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8629 if (RT_SUCCESS(rc))
8630 {
8631 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8632 AssertRCReturn(rc2, rc2);
8633 return VINF_SUCCESS;
8634 }
8635 return rc;
8636}
8637
8638
8639/**
8640 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8641 * Conditional VM-exit.
8642 */
8643HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8644{
8645 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8646
8647#ifndef IN_NEM_DARWIN
8648 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8649
8650 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8651 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8652 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8653 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8654 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8655
8656 /*
8657 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8658 */
8659 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8660 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8661 {
8662 /*
8663 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
8664 * instruction emulation to inject the original event. Otherwise, injecting the original event
8665 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
8666 */
8667 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8668 { /* likely */ }
8669 else
8670 {
8671 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8672#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8673 /** @todo NSTVMX: Think about how this should be handled. */
8674 if (pVmxTransient->fIsNestedGuest)
8675 return VERR_VMX_IPE_3;
8676#endif
8677 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8678 }
8679 }
8680 else
8681 {
8682 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8683 return rcStrict;
8684 }
8685
8686 /*
8687 * Get sufficient state and update the exit history entry.
8688 */
8689 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8690 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8691 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8692 AssertRCReturn(rc, rc);
8693
8694 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8695 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8696 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
8697 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8698 if (!pExitRec)
8699 {
8700 /*
8701 * If we succeed, resume guest execution.
8702 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8703 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8704 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8705 * weird case. See @bugref{6043}.
8706 */
8707 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8708 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8709/** @todo bird: We can probably just go straight to IOM here and assume that
8710 * it's MMIO, then fall back on PGM if that hunch didn't work out so
8711 * well. However, we need to address that aliasing workarounds that
8712 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
8713 *
8714 * Might also be interesting to see if we can get this done more or
8715 * less locklessly inside IOM. Need to consider the lookup table
8716 * updating and use a bit more carefully first (or do all updates via
8717 * rendezvous) */
8718 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
8719 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
8720 if ( rcStrict == VINF_SUCCESS
8721 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8722 || rcStrict == VERR_PAGE_NOT_PRESENT)
8723 {
8724 /* Successfully handled MMIO operation. */
8725 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8726 | HM_CHANGED_GUEST_APIC_TPR);
8727 rcStrict = VINF_SUCCESS;
8728 }
8729 }
8730 else
8731 {
8732 /*
8733 * Frequent exit or something needing probing. Call EMHistoryExec.
8734 */
8735 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
8736 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
8737
8738 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8739 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8740
8741 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8742 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8743 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8744 }
8745 return rcStrict;
8746#else
8747 AssertFailed();
8748 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
8749#endif
8750}
8751
8752
8753/**
8754 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8755 * VM-exit.
8756 */
8757HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8758{
8759 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8760#ifndef IN_NEM_DARWIN
8761 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8762
8763 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8764 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8765 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8766 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8767 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8768 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8769
8770 /*
8771 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8772 */
8773 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8774 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8775 {
8776 /*
8777 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
8778 * we shall resolve the nested #PF and re-inject the original event.
8779 */
8780 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8781 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
8782 }
8783 else
8784 {
8785 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8786 return rcStrict;
8787 }
8788
8789 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8790 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8791 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8792 AssertRCReturn(rc, rc);
8793
8794 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8795 uint64_t const uExitQual = pVmxTransient->uExitQual;
8796 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
8797
8798 RTGCUINT uErrorCode = 0;
8799 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
8800 uErrorCode |= X86_TRAP_PF_ID;
8801 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8802 uErrorCode |= X86_TRAP_PF_RW;
8803 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
8804 uErrorCode |= X86_TRAP_PF_P;
8805
8806 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8807 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%#RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
8808
8809 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8810
8811 /*
8812 * Handle the pagefault trap for the nested shadow table.
8813 */
8814 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8815 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
8816 TRPMResetTrap(pVCpu);
8817
8818 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8819 if ( rcStrict == VINF_SUCCESS
8820 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8821 || rcStrict == VERR_PAGE_NOT_PRESENT)
8822 {
8823 /* Successfully synced our nested page tables. */
8824 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
8825 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
8826 return VINF_SUCCESS;
8827 }
8828#else
8829 PVM pVM = pVCpu->CTX_SUFF(pVM);
8830 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
8831 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8832 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8833 vmxHCImportGuestRip(pVCpu);
8834 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
8835
8836 /*
8837 * Ask PGM for information about the given GCPhys. We need to check if we're
8838 * out of sync first.
8839 */
8840 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
8841 PGMPHYSNEMPAGEINFO Info;
8842 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
8843 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
8844 if (RT_SUCCESS(rc))
8845 {
8846 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8847 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
8848 {
8849 if (State.fCanResume)
8850 {
8851 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
8852 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8853 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8854 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8855 State.fDidSomething ? "" : " no-change"));
8856 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
8857 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8858 return VINF_SUCCESS;
8859 }
8860 }
8861
8862 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
8863 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8864 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8865 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8866 State.fDidSomething ? "" : " no-change"));
8867 }
8868 else
8869 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
8870 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8871 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
8872
8873 /*
8874 * Emulate the memory access, either access handler or special memory.
8875 */
8876 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
8877 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8878 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
8879 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
8880 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8881
8882 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8883 AssertRCReturn(rc, rc);
8884
8885 VBOXSTRICTRC rcStrict;
8886 if (!pExitRec)
8887 rcStrict = IEMExecOne(pVCpu);
8888 else
8889 {
8890 /* Frequent access or probing. */
8891 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8892 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8893 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8894 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8895 }
8896
8897 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8898#endif
8899
8900 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8901 return rcStrict;
8902}
8903
8904
8905#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8906/**
8907 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
8908 */
8909HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8910{
8911 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8912
8913 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8914 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8915 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8916 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8917 | CPUMCTX_EXTRN_HWVIRT
8918 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8919 AssertRCReturn(rc, rc);
8920
8921 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8922
8923 VMXVEXITINFO ExitInfo;
8924 RT_ZERO(ExitInfo);
8925 ExitInfo.uReason = pVmxTransient->uExitReason;
8926 ExitInfo.u64Qual = pVmxTransient->uExitQual;
8927 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
8928 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
8929 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
8930
8931 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
8932 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8933 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
8934 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8935 {
8936 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8937 rcStrict = VINF_SUCCESS;
8938 }
8939 return rcStrict;
8940}
8941
8942
8943/**
8944 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
8945 */
8946HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8947{
8948 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8949
8950 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
8951 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
8952 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8953 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8954 AssertRCReturn(rc, rc);
8955
8956 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8957
8958 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
8959 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
8960 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
8961 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8962 {
8963 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8964 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
8965 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
8966 }
8967 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8968 return rcStrict;
8969}
8970
8971
8972/**
8973 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
8974 */
8975HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8976{
8977 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8978
8979 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8980 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8981 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8982 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8983 | CPUMCTX_EXTRN_HWVIRT
8984 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8985 AssertRCReturn(rc, rc);
8986
8987 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8988
8989 VMXVEXITINFO ExitInfo;
8990 RT_ZERO(ExitInfo);
8991 ExitInfo.uReason = pVmxTransient->uExitReason;
8992 ExitInfo.u64Qual = pVmxTransient->uExitQual;
8993 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
8994 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
8995 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
8996
8997 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
8998 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8999 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9000 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9001 {
9002 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9003 rcStrict = VINF_SUCCESS;
9004 }
9005 return rcStrict;
9006}
9007
9008
9009/**
9010 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9011 */
9012HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9013{
9014 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9015
9016 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9017 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9018 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9019 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9020 | CPUMCTX_EXTRN_HWVIRT
9021 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9022 AssertRCReturn(rc, rc);
9023
9024 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9025
9026 VMXVEXITINFO ExitInfo;
9027 RT_ZERO(ExitInfo);
9028 ExitInfo.uReason = pVmxTransient->uExitReason;
9029 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9030 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9031 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9032 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9033
9034 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9035 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9036 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9037 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9038 {
9039 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9040 rcStrict = VINF_SUCCESS;
9041 }
9042 return rcStrict;
9043}
9044
9045
9046/**
9047 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9048 */
9049HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9050{
9051 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9052
9053 /*
9054 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9055 * thus might not need to import the shadow VMCS state, it's safer just in case
9056 * code elsewhere dares look at unsynced VMCS fields.
9057 */
9058 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9059 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9060 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9061 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9062 | CPUMCTX_EXTRN_HWVIRT
9063 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9064 AssertRCReturn(rc, rc);
9065
9066 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9067
9068 VMXVEXITINFO ExitInfo;
9069 RT_ZERO(ExitInfo);
9070 ExitInfo.uReason = pVmxTransient->uExitReason;
9071 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9072 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9073 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9074 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9075 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9076
9077 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9078 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9079 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9080 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9081 {
9082 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9083 rcStrict = VINF_SUCCESS;
9084 }
9085 return rcStrict;
9086}
9087
9088
9089/**
9090 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9091 */
9092HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9093{
9094 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9095
9096 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9097 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9098 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9099 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9100 AssertRCReturn(rc, rc);
9101
9102 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9103
9104 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9105 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9106 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9107 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9108 {
9109 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9110 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9111 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9112 }
9113 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9114 return rcStrict;
9115}
9116
9117
9118/**
9119 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9120 */
9121HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9122{
9123 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9124
9125 /*
9126 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9127 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9128 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9129 */
9130 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9131 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9132 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9133 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9134 | CPUMCTX_EXTRN_HWVIRT
9135 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9136 AssertRCReturn(rc, rc);
9137
9138 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9139
9140 VMXVEXITINFO ExitInfo;
9141 RT_ZERO(ExitInfo);
9142 ExitInfo.uReason = pVmxTransient->uExitReason;
9143 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9144 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9145 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9146 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9147 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9148
9149 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9150 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9151 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9152 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9153 {
9154 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9155 rcStrict = VINF_SUCCESS;
9156 }
9157 return rcStrict;
9158}
9159
9160
9161/**
9162 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9163 */
9164HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9165{
9166 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9167
9168 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9169 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
9170 | CPUMCTX_EXTRN_HWVIRT
9171 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9172 AssertRCReturn(rc, rc);
9173
9174 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9175
9176 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9177 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9178 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9179 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9180 {
9181 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9182 rcStrict = VINF_SUCCESS;
9183 }
9184 return rcStrict;
9185}
9186
9187
9188/**
9189 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9190 */
9191HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9192{
9193 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9194
9195 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9196 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9197 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9198 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9199 | CPUMCTX_EXTRN_HWVIRT
9200 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9201 AssertRCReturn(rc, rc);
9202
9203 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9204
9205 VMXVEXITINFO ExitInfo;
9206 RT_ZERO(ExitInfo);
9207 ExitInfo.uReason = pVmxTransient->uExitReason;
9208 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9209 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9210 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9211 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9212
9213 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9214 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9215 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9216 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9217 {
9218 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9219 rcStrict = VINF_SUCCESS;
9220 }
9221 return rcStrict;
9222}
9223
9224
9225/**
9226 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9227 */
9228HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9229{
9230 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9231
9232 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9233 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9234 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9235 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9236 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9237 AssertRCReturn(rc, rc);
9238
9239 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9240
9241 VMXVEXITINFO ExitInfo;
9242 RT_ZERO(ExitInfo);
9243 ExitInfo.uReason = pVmxTransient->uExitReason;
9244 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9245 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9246 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9247 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9248
9249 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9250 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9251 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9252 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9253 {
9254 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9255 rcStrict = VINF_SUCCESS;
9256 }
9257 return rcStrict;
9258}
9259
9260
9261# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9262/**
9263 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9264 */
9265HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9266{
9267 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9268
9269 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9270 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9271 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9272 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9273 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9274 AssertRCReturn(rc, rc);
9275
9276 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9277
9278 VMXVEXITINFO ExitInfo;
9279 RT_ZERO(ExitInfo);
9280 ExitInfo.uReason = pVmxTransient->uExitReason;
9281 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9282 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9283 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9284 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9285
9286 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9287 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9288 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9289 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9290 {
9291 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9292 rcStrict = VINF_SUCCESS;
9293 }
9294 return rcStrict;
9295}
9296# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9297#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9298/** @} */
9299
9300
9301#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9302/** @name Nested-guest VM-exit handlers.
9303 * @{
9304 */
9305/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9306/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9307/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9308
9309/**
9310 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9311 * Conditional VM-exit.
9312 */
9313HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9314{
9315 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9316
9317 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
9318
9319 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9320 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9321 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9322
9323 switch (uExitIntType)
9324 {
9325#ifndef IN_NEM_DARWIN
9326 /*
9327 * Physical NMIs:
9328 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9329 */
9330 case VMX_EXIT_INT_INFO_TYPE_NMI:
9331 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9332#endif
9333
9334 /*
9335 * Hardware exceptions,
9336 * Software exceptions,
9337 * Privileged software exceptions:
9338 * Figure out if the exception must be delivered to the guest or the nested-guest.
9339 */
9340 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9341 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9342 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9343 {
9344 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
9345 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9346 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9347 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9348
9349 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9350 bool const fIntercept = CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo),
9351 pVmxTransient->uExitIntErrorCode);
9352 if (fIntercept)
9353 {
9354 /* Exit qualification is required for debug and page-fault exceptions. */
9355 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9356
9357 /*
9358 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9359 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9360 * length. However, if delivery of a software interrupt, software exception or privileged
9361 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9362 */
9363 VMXVEXITINFO ExitInfo;
9364 RT_ZERO(ExitInfo);
9365 ExitInfo.uReason = pVmxTransient->uExitReason;
9366 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9367 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9368
9369 VMXVEXITEVENTINFO ExitEventInfo;
9370 RT_ZERO(ExitEventInfo);
9371 ExitEventInfo.uExitIntInfo = pVmxTransient->uExitIntInfo;
9372 ExitEventInfo.uExitIntErrCode = pVmxTransient->uExitIntErrorCode;
9373 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9374 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9375
9376#ifdef DEBUG_ramshankar
9377 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9378 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n", pVmxTransient->uExitIntInfo,
9379 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9380 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9381 {
9382 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n", pVmxTransient->uIdtVectoringInfo,
9383 pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9384 }
9385#endif
9386 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9387 }
9388
9389 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9390 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9391 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9392 }
9393
9394 /*
9395 * Software interrupts:
9396 * VM-exits cannot be caused by software interrupts.
9397 *
9398 * External interrupts:
9399 * This should only happen when "acknowledge external interrupts on VM-exit"
9400 * control is set. However, we never set this when executing a guest or
9401 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9402 * the guest.
9403 */
9404 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9405 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9406 default:
9407 {
9408 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9409 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9410 }
9411 }
9412}
9413
9414
9415/**
9416 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
9417 * Unconditional VM-exit.
9418 */
9419HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9420{
9421 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9422 return IEMExecVmxVmexitTripleFault(pVCpu);
9423}
9424
9425
9426/**
9427 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9428 */
9429HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9430{
9431 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9432
9433 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
9434 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9435 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9436}
9437
9438
9439/**
9440 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9441 */
9442HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9443{
9444 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9445
9446 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
9447 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9448 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9449}
9450
9451
9452/**
9453 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
9454 * Unconditional VM-exit.
9455 */
9456HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9457{
9458 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9459
9460 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9461 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9462 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9463 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9464
9465 VMXVEXITINFO ExitInfo;
9466 RT_ZERO(ExitInfo);
9467 ExitInfo.uReason = pVmxTransient->uExitReason;
9468 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9469 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9470
9471 VMXVEXITEVENTINFO ExitEventInfo;
9472 RT_ZERO(ExitEventInfo);
9473 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9474 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9475 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
9476}
9477
9478
9479/**
9480 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
9481 */
9482HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9483{
9484 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9485
9486 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
9487 {
9488 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9489 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9490 }
9491 return vmxHCExitHlt(pVCpu, pVmxTransient);
9492}
9493
9494
9495/**
9496 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
9497 */
9498HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9499{
9500 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9501
9502 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
9503 {
9504 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9505 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9506
9507 VMXVEXITINFO ExitInfo;
9508 RT_ZERO(ExitInfo);
9509 ExitInfo.uReason = pVmxTransient->uExitReason;
9510 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9511 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9512 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9513 }
9514 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
9515}
9516
9517
9518/**
9519 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
9520 */
9521HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9522{
9523 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9524
9525 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
9526 {
9527 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9528 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9529 }
9530 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
9531}
9532
9533
9534/**
9535 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
9536 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
9537 */
9538HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9539{
9540 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9541
9542 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
9543 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
9544
9545 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9546
9547 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
9548 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9549 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9550
9551 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
9552 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
9553 u64VmcsField &= UINT64_C(0xffffffff);
9554
9555 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
9556 {
9557 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9558 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9559
9560 VMXVEXITINFO ExitInfo;
9561 RT_ZERO(ExitInfo);
9562 ExitInfo.uReason = pVmxTransient->uExitReason;
9563 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9564 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9565 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9566 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9567 }
9568
9569 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
9570 return vmxHCExitVmread(pVCpu, pVmxTransient);
9571 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
9572}
9573
9574
9575/**
9576 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
9577 */
9578HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9579{
9580 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9581
9582 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
9583 {
9584 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9585 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9586 }
9587
9588 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
9589}
9590
9591
9592/**
9593 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
9594 * Conditional VM-exit.
9595 */
9596HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9597{
9598 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9599
9600 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9601 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9602
9603 VBOXSTRICTRC rcStrict;
9604 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
9605 switch (uAccessType)
9606 {
9607 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
9608 {
9609 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9610 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9611 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9612 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9613
9614 bool fIntercept;
9615 switch (iCrReg)
9616 {
9617 case 0:
9618 case 4:
9619 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
9620 break;
9621
9622 case 3:
9623 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
9624 break;
9625
9626 case 8:
9627 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
9628 break;
9629
9630 default:
9631 fIntercept = false;
9632 break;
9633 }
9634 if (fIntercept)
9635 {
9636 VMXVEXITINFO ExitInfo;
9637 RT_ZERO(ExitInfo);
9638 ExitInfo.uReason = pVmxTransient->uExitReason;
9639 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9640 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9641 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9642 }
9643 else
9644 {
9645 int const rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
9646 AssertRCReturn(rc, rc);
9647 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9648 }
9649 break;
9650 }
9651
9652 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
9653 {
9654 /*
9655 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
9656 * CR2 reads do not cause a VM-exit.
9657 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
9658 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
9659 */
9660 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9661 if ( iCrReg == 3
9662 || iCrReg == 8)
9663 {
9664 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
9665 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
9666 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
9667 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
9668 {
9669 VMXVEXITINFO ExitInfo;
9670 RT_ZERO(ExitInfo);
9671 ExitInfo.uReason = pVmxTransient->uExitReason;
9672 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9673 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9674 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9675 }
9676 else
9677 {
9678 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9679 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9680 }
9681 }
9682 else
9683 {
9684 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
9685 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
9686 }
9687 break;
9688 }
9689
9690 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
9691 {
9692 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
9693 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
9694 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
9695 if ( (uGstHostMask & X86_CR0_TS)
9696 && (uReadShadow & X86_CR0_TS))
9697 {
9698 VMXVEXITINFO ExitInfo;
9699 RT_ZERO(ExitInfo);
9700 ExitInfo.uReason = pVmxTransient->uExitReason;
9701 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9702 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9703 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9704 }
9705 else
9706 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
9707 break;
9708 }
9709
9710 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
9711 {
9712 RTGCPTR GCPtrEffDst;
9713 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
9714 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
9715 if (fMemOperand)
9716 {
9717 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9718 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
9719 }
9720 else
9721 GCPtrEffDst = NIL_RTGCPTR;
9722
9723 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
9724 {
9725 VMXVEXITINFO ExitInfo;
9726 RT_ZERO(ExitInfo);
9727 ExitInfo.uReason = pVmxTransient->uExitReason;
9728 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9729 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
9730 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9731 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9732 }
9733 else
9734 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
9735 break;
9736 }
9737
9738 default:
9739 {
9740 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
9741 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
9742 }
9743 }
9744
9745 if (rcStrict == VINF_IEM_RAISED_XCPT)
9746 {
9747 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9748 rcStrict = VINF_SUCCESS;
9749 }
9750 return rcStrict;
9751}
9752
9753
9754/**
9755 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
9756 * Conditional VM-exit.
9757 */
9758HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9759{
9760 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9761
9762 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
9763 {
9764 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9765 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9766
9767 VMXVEXITINFO ExitInfo;
9768 RT_ZERO(ExitInfo);
9769 ExitInfo.uReason = pVmxTransient->uExitReason;
9770 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9771 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9772 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9773 }
9774 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
9775}
9776
9777
9778/**
9779 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
9780 * Conditional VM-exit.
9781 */
9782HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9783{
9784 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9785
9786 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9787
9788 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
9789 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
9790 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
9791
9792 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
9793 uint8_t const cbAccess = s_aIOSizes[uIOSize];
9794 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
9795 {
9796 /*
9797 * IN/OUT instruction:
9798 * - Provides VM-exit instruction length.
9799 *
9800 * INS/OUTS instruction:
9801 * - Provides VM-exit instruction length.
9802 * - Provides Guest-linear address.
9803 * - Optionally provides VM-exit instruction info (depends on CPU feature).
9804 */
9805 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9806 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9807
9808 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
9809 pVmxTransient->ExitInstrInfo.u = 0;
9810 pVmxTransient->uGuestLinearAddr = 0;
9811
9812 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
9813 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
9814 if (fIOString)
9815 {
9816 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9817 if (fVmxInsOutsInfo)
9818 {
9819 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
9820 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9821 }
9822 }
9823
9824 VMXVEXITINFO ExitInfo;
9825 RT_ZERO(ExitInfo);
9826 ExitInfo.uReason = pVmxTransient->uExitReason;
9827 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9828 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9829 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9830 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
9831 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9832 }
9833 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
9834}
9835
9836
9837/**
9838 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
9839 */
9840HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9841{
9842 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9843
9844 uint32_t fMsrpm;
9845 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9846 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9847 else
9848 fMsrpm = VMXMSRPM_EXIT_RD;
9849
9850 if (fMsrpm & VMXMSRPM_EXIT_RD)
9851 {
9852 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9853 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9854 }
9855 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
9856}
9857
9858
9859/**
9860 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
9861 */
9862HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9863{
9864 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9865
9866 uint32_t fMsrpm;
9867 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9868 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9869 else
9870 fMsrpm = VMXMSRPM_EXIT_WR;
9871
9872 if (fMsrpm & VMXMSRPM_EXIT_WR)
9873 {
9874 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9875 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9876 }
9877 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
9878}
9879
9880
9881/**
9882 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
9883 */
9884HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9885{
9886 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9887
9888 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
9889 {
9890 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9891 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9892 }
9893 return vmxHCExitMwait(pVCpu, pVmxTransient);
9894}
9895
9896
9897/**
9898 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
9899 * VM-exit.
9900 */
9901HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9902{
9903 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9904
9905 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
9906 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9907 VMXVEXITINFO ExitInfo;
9908 RT_ZERO(ExitInfo);
9909 ExitInfo.uReason = pVmxTransient->uExitReason;
9910 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9911 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9912}
9913
9914
9915/**
9916 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
9917 */
9918HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9919{
9920 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9921
9922 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
9923 {
9924 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9925 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9926 }
9927 return vmxHCExitMonitor(pVCpu, pVmxTransient);
9928}
9929
9930
9931/**
9932 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
9933 */
9934HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9935{
9936 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9937
9938 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
9939 * PAUSE when executing a nested-guest? If it does not, we would not need
9940 * to check for the intercepts here. Just call VM-exit... */
9941
9942 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
9943 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
9944 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
9945 {
9946 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9947 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9948 }
9949 return vmxHCExitPause(pVCpu, pVmxTransient);
9950}
9951
9952
9953/**
9954 * Nested-guest VM-exit handler for when the TPR value is lowered below the
9955 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
9956 */
9957HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9958{
9959 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9960
9961 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
9962 {
9963 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9964 VMXVEXITINFO ExitInfo;
9965 RT_ZERO(ExitInfo);
9966 ExitInfo.uReason = pVmxTransient->uExitReason;
9967 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9968 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9969 }
9970 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
9971}
9972
9973
9974/**
9975 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
9976 * VM-exit.
9977 */
9978HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9979{
9980 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9981
9982 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9983 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9984 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9985 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9986
9987 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
9988
9989 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
9990 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
9991
9992 VMXVEXITINFO ExitInfo;
9993 RT_ZERO(ExitInfo);
9994 ExitInfo.uReason = pVmxTransient->uExitReason;
9995 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9996 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9997
9998 VMXVEXITEVENTINFO ExitEventInfo;
9999 RT_ZERO(ExitEventInfo);
10000 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10001 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10002 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10003}
10004
10005
10006/**
10007 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10008 * Conditional VM-exit.
10009 */
10010HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10011{
10012 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10013
10014 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10015 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10016 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10017}
10018
10019
10020/**
10021 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10022 * Conditional VM-exit.
10023 */
10024HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10025{
10026 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10027
10028 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10029 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10030 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10031}
10032
10033
10034/**
10035 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10036 */
10037HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10038{
10039 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10040
10041 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10042 {
10043 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10044 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10045 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10046 }
10047 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10048}
10049
10050
10051/**
10052 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10053 */
10054HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10055{
10056 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10057
10058 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10059 {
10060 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10061 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10062 }
10063 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10064}
10065
10066
10067/**
10068 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10069 */
10070HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10071{
10072 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10073
10074 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10075 {
10076 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10077 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10078 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10079 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10080
10081 VMXVEXITINFO ExitInfo;
10082 RT_ZERO(ExitInfo);
10083 ExitInfo.uReason = pVmxTransient->uExitReason;
10084 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10085 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10086 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10087 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10088 }
10089 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10090}
10091
10092
10093/**
10094 * Nested-guest VM-exit handler for invalid-guest state
10095 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10096 */
10097HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10098{
10099 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10100
10101 /*
10102 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10103 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10104 * Handle it like it's in an invalid guest state of the outer guest.
10105 *
10106 * When the fast path is implemented, this should be changed to cause the corresponding
10107 * nested-guest VM-exit.
10108 */
10109 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10110}
10111
10112
10113/**
10114 * Nested-guest VM-exit handler for instructions that cause VM-exits uncondtionally
10115 * and only provide the instruction length.
10116 *
10117 * Unconditional VM-exit.
10118 */
10119HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10120{
10121 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10122
10123#ifdef VBOX_STRICT
10124 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10125 switch (pVmxTransient->uExitReason)
10126 {
10127 case VMX_EXIT_ENCLS:
10128 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10129 break;
10130
10131 case VMX_EXIT_VMFUNC:
10132 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10133 break;
10134 }
10135#endif
10136
10137 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10138 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10139}
10140
10141
10142/**
10143 * Nested-guest VM-exit handler for instructions that provide instruction length as
10144 * well as more information.
10145 *
10146 * Unconditional VM-exit.
10147 */
10148HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10149{
10150 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10151
10152#ifdef VBOX_STRICT
10153 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10154 switch (pVmxTransient->uExitReason)
10155 {
10156 case VMX_EXIT_GDTR_IDTR_ACCESS:
10157 case VMX_EXIT_LDTR_TR_ACCESS:
10158 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10159 break;
10160
10161 case VMX_EXIT_RDRAND:
10162 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10163 break;
10164
10165 case VMX_EXIT_RDSEED:
10166 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10167 break;
10168
10169 case VMX_EXIT_XSAVES:
10170 case VMX_EXIT_XRSTORS:
10171 /** @todo NSTVMX: Verify XSS-bitmap. */
10172 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10173 break;
10174
10175 case VMX_EXIT_UMWAIT:
10176 case VMX_EXIT_TPAUSE:
10177 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10178 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10179 break;
10180
10181 case VMX_EXIT_LOADIWKEY:
10182 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10183 break;
10184 }
10185#endif
10186
10187 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10188 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10189 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10190
10191 VMXVEXITINFO ExitInfo;
10192 RT_ZERO(ExitInfo);
10193 ExitInfo.uReason = pVmxTransient->uExitReason;
10194 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10195 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10196 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10197 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10198}
10199
10200
10201# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10202/**
10203 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10204 * Conditional VM-exit.
10205 */
10206HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10207{
10208 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10209 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10210
10211 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10212 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10213 {
10214 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
10215 AssertRCReturn(rc, rc);
10216
10217 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10218 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10219 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10220
10221 RTGCPHYS const GCPhysNested = pVmxTransient->uGuestPhysicalAddr;
10222 uint64_t const uExitQual = pVmxTransient->uExitQual;
10223
10224 RTGCPTR GCPtrNested;
10225 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10226 if (fIsLinearAddrValid)
10227 {
10228 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
10229 GCPtrNested = pVmxTransient->uGuestLinearAddr;
10230 }
10231 else
10232 GCPtrNested = 0;
10233
10234 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10235 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10236 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10237 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10238 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10239
10240 PGMPTWALK Walk;
10241 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10242 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, CPUMCTX2CORE(pCtx), GCPhysNested,
10243 fIsLinearAddrValid, GCPtrNested, &Walk);
10244 if (RT_SUCCESS(rcStrict))
10245 {
10246 if (rcStrict == VINF_SUCCESS)
10247 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10248 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10249 {
10250 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10251 rcStrict = VINF_SUCCESS;
10252 }
10253 return rcStrict;
10254 }
10255
10256 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10257 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10258
10259 VMXVEXITEVENTINFO ExitEventInfo;
10260 RT_ZERO(ExitEventInfo);
10261 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10262 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10263
10264 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10265 {
10266 VMXVEXITINFO ExitInfo;
10267 RT_ZERO(ExitInfo);
10268 ExitInfo.uReason = VMX_EXIT_EPT_VIOLATION;
10269 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10270 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10271 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
10272 ExitInfo.u64GuestPhysAddr = pVmxTransient->uGuestPhysicalAddr;
10273 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10274 }
10275
10276 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10277 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10278 }
10279
10280 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10281}
10282
10283
10284/**
10285 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10286 * Conditional VM-exit.
10287 */
10288HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10289{
10290 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10291 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10292
10293 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10294 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10295 {
10296 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
10297 AssertRCReturn(rc, rc);
10298
10299 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10300
10301 PGMPTWALK Walk;
10302 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10303 RTGCPHYS const GCPhysNested = pVmxTransient->uGuestPhysicalAddr;
10304 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, CPUMCTX2CORE(pCtx),
10305 GCPhysNested, false /* fIsLinearAddrValid */,
10306 0 /* GCPtrNested*/, &Walk);
10307 if (RT_SUCCESS(rcStrict))
10308 return VINF_EM_RAW_EMULATE_INSTR;
10309
10310 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10311 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10312
10313 VMXVEXITEVENTINFO ExitEventInfo;
10314 RT_ZERO(ExitEventInfo);
10315 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10316 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10317
10318 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10319 }
10320
10321 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10322}
10323# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10324
10325/** @} */
10326#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10327
10328
10329/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10330 * probes.
10331 *
10332 * The following few functions and associated structure contains the bloat
10333 * necessary for providing detailed debug events and dtrace probes as well as
10334 * reliable host side single stepping. This works on the principle of
10335 * "subclassing" the normal execution loop and workers. We replace the loop
10336 * method completely and override selected helpers to add necessary adjustments
10337 * to their core operation.
10338 *
10339 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10340 * any performance for debug and analysis features.
10341 *
10342 * @{
10343 */
10344
10345/**
10346 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10347 * the debug run loop.
10348 */
10349typedef struct VMXRUNDBGSTATE
10350{
10351 /** The RIP we started executing at. This is for detecting that we stepped. */
10352 uint64_t uRipStart;
10353 /** The CS we started executing with. */
10354 uint16_t uCsStart;
10355
10356 /** Whether we've actually modified the 1st execution control field. */
10357 bool fModifiedProcCtls : 1;
10358 /** Whether we've actually modified the 2nd execution control field. */
10359 bool fModifiedProcCtls2 : 1;
10360 /** Whether we've actually modified the exception bitmap. */
10361 bool fModifiedXcptBitmap : 1;
10362
10363 /** We desire the modified the CR0 mask to be cleared. */
10364 bool fClearCr0Mask : 1;
10365 /** We desire the modified the CR4 mask to be cleared. */
10366 bool fClearCr4Mask : 1;
10367 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10368 uint32_t fCpe1Extra;
10369 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10370 uint32_t fCpe1Unwanted;
10371 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10372 uint32_t fCpe2Extra;
10373 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10374 uint32_t bmXcptExtra;
10375 /** The sequence number of the Dtrace provider settings the state was
10376 * configured against. */
10377 uint32_t uDtraceSettingsSeqNo;
10378 /** VM-exits to check (one bit per VM-exit). */
10379 uint32_t bmExitsToCheck[3];
10380
10381 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10382 uint32_t fProcCtlsInitial;
10383 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10384 uint32_t fProcCtls2Initial;
10385 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10386 uint32_t bmXcptInitial;
10387} VMXRUNDBGSTATE;
10388AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10389typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10390
10391
10392/**
10393 * Initializes the VMXRUNDBGSTATE structure.
10394 *
10395 * @param pVCpu The cross context virtual CPU structure of the
10396 * calling EMT.
10397 * @param pVmxTransient The VMX-transient structure.
10398 * @param pDbgState The debug state to initialize.
10399 */
10400static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10401{
10402 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10403 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10404
10405 pDbgState->fModifiedProcCtls = false;
10406 pDbgState->fModifiedProcCtls2 = false;
10407 pDbgState->fModifiedXcptBitmap = false;
10408 pDbgState->fClearCr0Mask = false;
10409 pDbgState->fClearCr4Mask = false;
10410 pDbgState->fCpe1Extra = 0;
10411 pDbgState->fCpe1Unwanted = 0;
10412 pDbgState->fCpe2Extra = 0;
10413 pDbgState->bmXcptExtra = 0;
10414 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10415 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10416 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10417}
10418
10419
10420/**
10421 * Updates the VMSC fields with changes requested by @a pDbgState.
10422 *
10423 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10424 * immediately before executing guest code, i.e. when interrupts are disabled.
10425 * We don't check status codes here as we cannot easily assert or return in the
10426 * latter case.
10427 *
10428 * @param pVCpu The cross context virtual CPU structure.
10429 * @param pVmxTransient The VMX-transient structure.
10430 * @param pDbgState The debug state.
10431 */
10432static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10433{
10434 /*
10435 * Ensure desired flags in VMCS control fields are set.
10436 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10437 *
10438 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10439 * there should be no stale data in pCtx at this point.
10440 */
10441 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10442 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10443 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10444 {
10445 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10446 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10447 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10448 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10449 pDbgState->fModifiedProcCtls = true;
10450 }
10451
10452 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
10453 {
10454 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
10455 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
10456 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
10457 pDbgState->fModifiedProcCtls2 = true;
10458 }
10459
10460 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
10461 {
10462 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
10463 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
10464 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
10465 pDbgState->fModifiedXcptBitmap = true;
10466 }
10467
10468 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
10469 {
10470 pVmcsInfo->u64Cr0Mask = 0;
10471 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
10472 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
10473 }
10474
10475 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
10476 {
10477 pVmcsInfo->u64Cr4Mask = 0;
10478 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
10479 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
10480 }
10481
10482 NOREF(pVCpu);
10483}
10484
10485
10486/**
10487 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
10488 * re-entry next time around.
10489 *
10490 * @returns Strict VBox status code (i.e. informational status codes too).
10491 * @param pVCpu The cross context virtual CPU structure.
10492 * @param pVmxTransient The VMX-transient structure.
10493 * @param pDbgState The debug state.
10494 * @param rcStrict The return code from executing the guest using single
10495 * stepping.
10496 */
10497static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
10498 VBOXSTRICTRC rcStrict)
10499{
10500 /*
10501 * Restore VM-exit control settings as we may not reenter this function the
10502 * next time around.
10503 */
10504 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10505
10506 /* We reload the initial value, trigger what we can of recalculations the
10507 next time around. From the looks of things, that's all that's required atm. */
10508 if (pDbgState->fModifiedProcCtls)
10509 {
10510 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
10511 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
10512 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
10513 AssertRC(rc2);
10514 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
10515 }
10516
10517 /* We're currently the only ones messing with this one, so just restore the
10518 cached value and reload the field. */
10519 if ( pDbgState->fModifiedProcCtls2
10520 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
10521 {
10522 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
10523 AssertRC(rc2);
10524 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
10525 }
10526
10527 /* If we've modified the exception bitmap, we restore it and trigger
10528 reloading and partial recalculation the next time around. */
10529 if (pDbgState->fModifiedXcptBitmap)
10530 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
10531
10532 return rcStrict;
10533}
10534
10535
10536/**
10537 * Configures VM-exit controls for current DBGF and DTrace settings.
10538 *
10539 * This updates @a pDbgState and the VMCS execution control fields to reflect
10540 * the necessary VM-exits demanded by DBGF and DTrace.
10541 *
10542 * @param pVCpu The cross context virtual CPU structure.
10543 * @param pVmxTransient The VMX-transient structure. May update
10544 * fUpdatedTscOffsettingAndPreemptTimer.
10545 * @param pDbgState The debug state.
10546 */
10547static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10548{
10549#ifndef IN_NEM_DARWIN
10550 /*
10551 * Take down the dtrace serial number so we can spot changes.
10552 */
10553 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
10554 ASMCompilerBarrier();
10555#endif
10556
10557 /*
10558 * We'll rebuild most of the middle block of data members (holding the
10559 * current settings) as we go along here, so start by clearing it all.
10560 */
10561 pDbgState->bmXcptExtra = 0;
10562 pDbgState->fCpe1Extra = 0;
10563 pDbgState->fCpe1Unwanted = 0;
10564 pDbgState->fCpe2Extra = 0;
10565 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
10566 pDbgState->bmExitsToCheck[i] = 0;
10567
10568 /*
10569 * Software interrupts (INT XXh) - no idea how to trigger these...
10570 */
10571 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10572 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
10573 || VBOXVMM_INT_SOFTWARE_ENABLED())
10574 {
10575 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10576 }
10577
10578 /*
10579 * INT3 breakpoints - triggered by #BP exceptions.
10580 */
10581 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
10582 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10583
10584 /*
10585 * Exception bitmap and XCPT events+probes.
10586 */
10587 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
10588 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
10589 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
10590
10591 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
10592 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
10593 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10594 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
10595 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
10596 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
10597 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
10598 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
10599 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
10600 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
10601 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
10602 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
10603 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
10604 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
10605 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
10606 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
10607 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
10608 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
10609
10610 if (pDbgState->bmXcptExtra)
10611 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10612
10613 /*
10614 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
10615 *
10616 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
10617 * So, when adding/changing/removing please don't forget to update it.
10618 *
10619 * Some of the macros are picking up local variables to save horizontal space,
10620 * (being able to see it in a table is the lesser evil here).
10621 */
10622#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
10623 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
10624 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
10625#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
10626 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10627 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10628 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10629 } else do { } while (0)
10630#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
10631 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10632 { \
10633 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
10634 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10635 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10636 } else do { } while (0)
10637#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
10638 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10639 { \
10640 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
10641 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10642 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10643 } else do { } while (0)
10644#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
10645 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10646 { \
10647 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
10648 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10649 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10650 } else do { } while (0)
10651
10652 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
10653 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
10654 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
10655 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
10656 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
10657
10658 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
10659 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
10660 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
10661 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
10662 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
10663 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
10664 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
10665 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
10666 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
10667 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
10668 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
10669 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
10670 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
10671 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
10672 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
10673 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
10674 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
10675 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
10676 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
10677 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
10678 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
10679 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
10680 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
10681 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
10682 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
10683 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
10684 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
10685 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
10686 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
10687 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
10688 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
10689 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
10690 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
10691 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
10692 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
10693 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
10694
10695 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
10696 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10697 {
10698 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
10699 | CPUMCTX_EXTRN_APIC_TPR);
10700 AssertRC(rc);
10701
10702#if 0 /** @todo fix me */
10703 pDbgState->fClearCr0Mask = true;
10704 pDbgState->fClearCr4Mask = true;
10705#endif
10706 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
10707 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
10708 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10709 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
10710 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
10711 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
10712 require clearing here and in the loop if we start using it. */
10713 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
10714 }
10715 else
10716 {
10717 if (pDbgState->fClearCr0Mask)
10718 {
10719 pDbgState->fClearCr0Mask = false;
10720 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
10721 }
10722 if (pDbgState->fClearCr4Mask)
10723 {
10724 pDbgState->fClearCr4Mask = false;
10725 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
10726 }
10727 }
10728 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
10729 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
10730
10731 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
10732 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
10733 {
10734 /** @todo later, need to fix handler as it assumes this won't usually happen. */
10735 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
10736 }
10737 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
10738 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
10739
10740 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
10741 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
10742 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
10743 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
10744 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
10745 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
10746 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
10747 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
10748#if 0 /** @todo too slow, fix handler. */
10749 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
10750#endif
10751 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
10752
10753 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
10754 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
10755 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
10756 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
10757 {
10758 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10759 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
10760 }
10761 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10762 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10763 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10764 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10765
10766 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
10767 || IS_EITHER_ENABLED(pVM, INSTR_STR)
10768 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
10769 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
10770 {
10771 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10772 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
10773 }
10774 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
10775 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
10776 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
10777 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
10778
10779 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
10780 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
10781 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
10782 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
10783 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
10784 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
10785 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
10786 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
10787 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
10788 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
10789 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
10790 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
10791 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
10792 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
10793 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
10794 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
10795 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
10796 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
10797 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
10798 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
10799 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
10800 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
10801
10802#undef IS_EITHER_ENABLED
10803#undef SET_ONLY_XBM_IF_EITHER_EN
10804#undef SET_CPE1_XBM_IF_EITHER_EN
10805#undef SET_CPEU_XBM_IF_EITHER_EN
10806#undef SET_CPE2_XBM_IF_EITHER_EN
10807
10808 /*
10809 * Sanitize the control stuff.
10810 */
10811 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
10812 if (pDbgState->fCpe2Extra)
10813 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
10814 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
10815 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
10816#ifndef IN_NEM_DARWIN
10817 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10818 {
10819 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
10820 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10821 }
10822#else
10823 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10824 {
10825 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
10826 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10827 }
10828#endif
10829
10830 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
10831 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
10832 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
10833 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
10834}
10835
10836
10837/**
10838 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
10839 * appropriate.
10840 *
10841 * The caller has checked the VM-exit against the
10842 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
10843 * already, so we don't have to do that either.
10844 *
10845 * @returns Strict VBox status code (i.e. informational status codes too).
10846 * @param pVCpu The cross context virtual CPU structure.
10847 * @param pVmxTransient The VMX-transient structure.
10848 * @param uExitReason The VM-exit reason.
10849 *
10850 * @remarks The name of this function is displayed by dtrace, so keep it short
10851 * and to the point. No longer than 33 chars long, please.
10852 */
10853static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
10854{
10855 /*
10856 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
10857 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
10858 *
10859 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
10860 * does. Must add/change/remove both places. Same ordering, please.
10861 *
10862 * Added/removed events must also be reflected in the next section
10863 * where we dispatch dtrace events.
10864 */
10865 bool fDtrace1 = false;
10866 bool fDtrace2 = false;
10867 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
10868 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
10869 uint32_t uEventArg = 0;
10870#define SET_EXIT(a_EventSubName) \
10871 do { \
10872 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10873 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10874 } while (0)
10875#define SET_BOTH(a_EventSubName) \
10876 do { \
10877 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
10878 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10879 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
10880 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10881 } while (0)
10882 switch (uExitReason)
10883 {
10884 case VMX_EXIT_MTF:
10885 return vmxHCExitMtf(pVCpu, pVmxTransient);
10886
10887 case VMX_EXIT_XCPT_OR_NMI:
10888 {
10889 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
10890 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
10891 {
10892 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10893 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10894 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10895 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
10896 {
10897 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
10898 {
10899 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
10900 uEventArg = pVmxTransient->uExitIntErrorCode;
10901 }
10902 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
10903 switch (enmEvent1)
10904 {
10905 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
10906 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
10907 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
10908 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
10909 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
10910 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
10911 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
10912 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
10913 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
10914 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
10915 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
10916 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
10917 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
10918 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
10919 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
10920 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
10921 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
10922 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
10923 default: break;
10924 }
10925 }
10926 else
10927 AssertFailed();
10928 break;
10929
10930 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10931 uEventArg = idxVector;
10932 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
10933 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
10934 break;
10935 }
10936 break;
10937 }
10938
10939 case VMX_EXIT_TRIPLE_FAULT:
10940 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
10941 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
10942 break;
10943 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
10944 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
10945 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
10946 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
10947 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
10948
10949 /* Instruction specific VM-exits: */
10950 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
10951 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
10952 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
10953 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
10954 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
10955 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
10956 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
10957 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
10958 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
10959 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
10960 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
10961 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
10962 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
10963 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
10964 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
10965 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
10966 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
10967 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
10968 case VMX_EXIT_MOV_CRX:
10969 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10970 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
10971 SET_BOTH(CRX_READ);
10972 else
10973 SET_BOTH(CRX_WRITE);
10974 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10975 break;
10976 case VMX_EXIT_MOV_DRX:
10977 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10978 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
10979 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
10980 SET_BOTH(DRX_READ);
10981 else
10982 SET_BOTH(DRX_WRITE);
10983 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
10984 break;
10985 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
10986 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
10987 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
10988 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
10989 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
10990 case VMX_EXIT_GDTR_IDTR_ACCESS:
10991 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10992 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
10993 {
10994 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
10995 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
10996 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
10997 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
10998 }
10999 break;
11000
11001 case VMX_EXIT_LDTR_TR_ACCESS:
11002 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
11003 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11004 {
11005 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11006 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11007 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11008 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11009 }
11010 break;
11011
11012 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11013 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11014 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11015 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11016 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11017 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11018 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11019 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11020 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11021 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11022 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11023
11024 /* Events that aren't relevant at this point. */
11025 case VMX_EXIT_EXT_INT:
11026 case VMX_EXIT_INT_WINDOW:
11027 case VMX_EXIT_NMI_WINDOW:
11028 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11029 case VMX_EXIT_PREEMPT_TIMER:
11030 case VMX_EXIT_IO_INSTR:
11031 break;
11032
11033 /* Errors and unexpected events. */
11034 case VMX_EXIT_INIT_SIGNAL:
11035 case VMX_EXIT_SIPI:
11036 case VMX_EXIT_IO_SMI:
11037 case VMX_EXIT_SMI:
11038 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11039 case VMX_EXIT_ERR_MSR_LOAD:
11040 case VMX_EXIT_ERR_MACHINE_CHECK:
11041 case VMX_EXIT_PML_FULL:
11042 case VMX_EXIT_VIRTUALIZED_EOI:
11043 break;
11044
11045 default:
11046 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11047 break;
11048 }
11049#undef SET_BOTH
11050#undef SET_EXIT
11051
11052 /*
11053 * Dtrace tracepoints go first. We do them here at once so we don't
11054 * have to copy the guest state saving and stuff a few dozen times.
11055 * Down side is that we've got to repeat the switch, though this time
11056 * we use enmEvent since the probes are a subset of what DBGF does.
11057 */
11058 if (fDtrace1 || fDtrace2)
11059 {
11060 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11061 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11062 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11063 switch (enmEvent1)
11064 {
11065 /** @todo consider which extra parameters would be helpful for each probe. */
11066 case DBGFEVENT_END: break;
11067 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11068 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11069 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11070 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11071 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11072 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11073 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11074 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11075 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11076 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11077 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11078 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11079 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11080 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11081 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11082 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11083 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11084 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11085 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11086 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11087 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11088 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11089 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11090 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11091 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11092 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11093 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11094 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11095 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11096 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11097 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11098 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11099 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11100 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11101 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11102 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11103 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11104 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11105 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11106 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11107 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11108 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11109 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11110 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11111 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11112 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11113 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11114 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11115 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11116 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11117 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11118 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11119 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11120 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11121 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11122 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11123 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11124 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11125 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11126 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11127 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11128 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11129 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11130 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11131 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11132 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11133 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11134 }
11135 switch (enmEvent2)
11136 {
11137 /** @todo consider which extra parameters would be helpful for each probe. */
11138 case DBGFEVENT_END: break;
11139 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11140 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11141 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11142 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11143 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11144 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11145 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11146 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11147 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11148 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11149 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11150 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11151 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11152 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11153 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11154 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11155 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11156 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11157 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11158 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11159 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11160 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11161 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11162 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11163 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11164 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11165 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11166 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11167 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11168 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11169 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11170 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11171 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11172 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11173 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11174 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11175 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11176 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11177 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11178 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11179 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11180 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11181 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11182 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11183 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11184 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11185 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11186 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11187 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11188 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11189 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11190 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11191 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11192 }
11193 }
11194
11195 /*
11196 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11197 * the DBGF call will do a full check).
11198 *
11199 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11200 * Note! If we have to events, we prioritize the first, i.e. the instruction
11201 * one, in order to avoid event nesting.
11202 */
11203 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11204 if ( enmEvent1 != DBGFEVENT_END
11205 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11206 {
11207 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11208 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11209 if (rcStrict != VINF_SUCCESS)
11210 return rcStrict;
11211 }
11212 else if ( enmEvent2 != DBGFEVENT_END
11213 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11214 {
11215 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11216 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11217 if (rcStrict != VINF_SUCCESS)
11218 return rcStrict;
11219 }
11220
11221 return VINF_SUCCESS;
11222}
11223
11224
11225/**
11226 * Single-stepping VM-exit filtering.
11227 *
11228 * This is preprocessing the VM-exits and deciding whether we've gotten far
11229 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11230 * handling is performed.
11231 *
11232 * @returns Strict VBox status code (i.e. informational status codes too).
11233 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11234 * @param pVmxTransient The VMX-transient structure.
11235 * @param pDbgState The debug state.
11236 */
11237DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11238{
11239 /*
11240 * Expensive (saves context) generic dtrace VM-exit probe.
11241 */
11242 uint32_t const uExitReason = pVmxTransient->uExitReason;
11243 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11244 { /* more likely */ }
11245 else
11246 {
11247 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11248 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11249 AssertRC(rc);
11250 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11251 }
11252
11253#ifndef IN_NEM_DARWIN
11254 /*
11255 * Check for host NMI, just to get that out of the way.
11256 */
11257 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11258 { /* normally likely */ }
11259 else
11260 {
11261 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
11262 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11263 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11264 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11265 }
11266#endif
11267
11268 /*
11269 * Check for single stepping event if we're stepping.
11270 */
11271 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11272 {
11273 switch (uExitReason)
11274 {
11275 case VMX_EXIT_MTF:
11276 return vmxHCExitMtf(pVCpu, pVmxTransient);
11277
11278 /* Various events: */
11279 case VMX_EXIT_XCPT_OR_NMI:
11280 case VMX_EXIT_EXT_INT:
11281 case VMX_EXIT_TRIPLE_FAULT:
11282 case VMX_EXIT_INT_WINDOW:
11283 case VMX_EXIT_NMI_WINDOW:
11284 case VMX_EXIT_TASK_SWITCH:
11285 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11286 case VMX_EXIT_APIC_ACCESS:
11287 case VMX_EXIT_EPT_VIOLATION:
11288 case VMX_EXIT_EPT_MISCONFIG:
11289 case VMX_EXIT_PREEMPT_TIMER:
11290
11291 /* Instruction specific VM-exits: */
11292 case VMX_EXIT_CPUID:
11293 case VMX_EXIT_GETSEC:
11294 case VMX_EXIT_HLT:
11295 case VMX_EXIT_INVD:
11296 case VMX_EXIT_INVLPG:
11297 case VMX_EXIT_RDPMC:
11298 case VMX_EXIT_RDTSC:
11299 case VMX_EXIT_RSM:
11300 case VMX_EXIT_VMCALL:
11301 case VMX_EXIT_VMCLEAR:
11302 case VMX_EXIT_VMLAUNCH:
11303 case VMX_EXIT_VMPTRLD:
11304 case VMX_EXIT_VMPTRST:
11305 case VMX_EXIT_VMREAD:
11306 case VMX_EXIT_VMRESUME:
11307 case VMX_EXIT_VMWRITE:
11308 case VMX_EXIT_VMXOFF:
11309 case VMX_EXIT_VMXON:
11310 case VMX_EXIT_MOV_CRX:
11311 case VMX_EXIT_MOV_DRX:
11312 case VMX_EXIT_IO_INSTR:
11313 case VMX_EXIT_RDMSR:
11314 case VMX_EXIT_WRMSR:
11315 case VMX_EXIT_MWAIT:
11316 case VMX_EXIT_MONITOR:
11317 case VMX_EXIT_PAUSE:
11318 case VMX_EXIT_GDTR_IDTR_ACCESS:
11319 case VMX_EXIT_LDTR_TR_ACCESS:
11320 case VMX_EXIT_INVEPT:
11321 case VMX_EXIT_RDTSCP:
11322 case VMX_EXIT_INVVPID:
11323 case VMX_EXIT_WBINVD:
11324 case VMX_EXIT_XSETBV:
11325 case VMX_EXIT_RDRAND:
11326 case VMX_EXIT_INVPCID:
11327 case VMX_EXIT_VMFUNC:
11328 case VMX_EXIT_RDSEED:
11329 case VMX_EXIT_XSAVES:
11330 case VMX_EXIT_XRSTORS:
11331 {
11332 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11333 AssertRCReturn(rc, rc);
11334 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11335 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11336 return VINF_EM_DBG_STEPPED;
11337 break;
11338 }
11339
11340 /* Errors and unexpected events: */
11341 case VMX_EXIT_INIT_SIGNAL:
11342 case VMX_EXIT_SIPI:
11343 case VMX_EXIT_IO_SMI:
11344 case VMX_EXIT_SMI:
11345 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11346 case VMX_EXIT_ERR_MSR_LOAD:
11347 case VMX_EXIT_ERR_MACHINE_CHECK:
11348 case VMX_EXIT_PML_FULL:
11349 case VMX_EXIT_VIRTUALIZED_EOI:
11350 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11351 break;
11352
11353 default:
11354 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11355 break;
11356 }
11357 }
11358
11359 /*
11360 * Check for debugger event breakpoints and dtrace probes.
11361 */
11362 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11363 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11364 {
11365 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11366 if (rcStrict != VINF_SUCCESS)
11367 return rcStrict;
11368 }
11369
11370 /*
11371 * Normal processing.
11372 */
11373#ifdef HMVMX_USE_FUNCTION_TABLE
11374 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11375#else
11376 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11377#endif
11378}
11379
11380/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette