VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 93199

Last change on this file since 93199 was 93199, checked in by vboxsync, 3 years ago

VMM: Nested VMX: bugref:10092 Added INVEPT VM-exit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 430.6 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 93199 2022-01-12 12:35:29Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
23# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
24#endif
25
26
27#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
28# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
29#endif
30
31
32/** Use the function table. */
33#define HMVMX_USE_FUNCTION_TABLE
34
35/** Determine which tagged-TLB flush handler to use. */
36#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
37#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
38#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
39#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
40
41/**
42 * Flags to skip redundant reads of some common VMCS fields that are not part of
43 * the guest-CPU or VCPU state but are needed while handling VM-exits.
44 */
45#define HMVMX_READ_IDT_VECTORING_INFO RT_BIT_32(0)
46#define HMVMX_READ_IDT_VECTORING_ERROR_CODE RT_BIT_32(1)
47#define HMVMX_READ_EXIT_QUALIFICATION RT_BIT_32(2)
48#define HMVMX_READ_EXIT_INSTR_LEN RT_BIT_32(3)
49#define HMVMX_READ_EXIT_INTERRUPTION_INFO RT_BIT_32(4)
50#define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE RT_BIT_32(5)
51#define HMVMX_READ_EXIT_INSTR_INFO RT_BIT_32(6)
52#define HMVMX_READ_GUEST_LINEAR_ADDR RT_BIT_32(7)
53#define HMVMX_READ_GUEST_PHYSICAL_ADDR RT_BIT_32(8)
54#define HMVMX_READ_GUEST_PENDING_DBG_XCPTS RT_BIT_32(9)
55
56/** All the VMCS fields required for processing of exception/NMI VM-exits. */
57#define HMVMX_READ_XCPT_INFO ( HMVMX_READ_EXIT_INTERRUPTION_INFO \
58 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE \
59 | HMVMX_READ_EXIT_INSTR_LEN \
60 | HMVMX_READ_IDT_VECTORING_INFO \
61 | HMVMX_READ_IDT_VECTORING_ERROR_CODE)
62
63/** Assert that all the given fields have been read from the VMCS. */
64#ifdef VBOX_STRICT
65# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
66 do { \
67 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
68 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
69 } while (0)
70#else
71# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
72#endif
73
74/**
75 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
76 * guest using hardware-assisted VMX.
77 *
78 * This excludes state like GPRs (other than RSP) which are always are
79 * swapped and restored across the world-switch and also registers like EFER,
80 * MSR which cannot be modified by the guest without causing a VM-exit.
81 */
82#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
83 | CPUMCTX_EXTRN_RFLAGS \
84 | CPUMCTX_EXTRN_RSP \
85 | CPUMCTX_EXTRN_SREG_MASK \
86 | CPUMCTX_EXTRN_TABLE_MASK \
87 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
88 | CPUMCTX_EXTRN_SYSCALL_MSRS \
89 | CPUMCTX_EXTRN_SYSENTER_MSRS \
90 | CPUMCTX_EXTRN_TSC_AUX \
91 | CPUMCTX_EXTRN_OTHER_MSRS \
92 | CPUMCTX_EXTRN_CR0 \
93 | CPUMCTX_EXTRN_CR3 \
94 | CPUMCTX_EXTRN_CR4 \
95 | CPUMCTX_EXTRN_DR7 \
96 | CPUMCTX_EXTRN_HWVIRT \
97 | CPUMCTX_EXTRN_INHIBIT_INT \
98 | CPUMCTX_EXTRN_INHIBIT_NMI)
99
100/**
101 * Exception bitmap mask for real-mode guests (real-on-v86).
102 *
103 * We need to intercept all exceptions manually except:
104 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
105 * due to bugs in Intel CPUs.
106 * - \#PF need not be intercepted even in real-mode if we have nested paging
107 * support.
108 */
109#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
110 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
111 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
112 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
113 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
114 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
115 | RT_BIT(X86_XCPT_XF))
116
117/** Maximum VM-instruction error number. */
118#define HMVMX_INSTR_ERROR_MAX 28
119
120/** Profiling macro. */
121#ifdef HM_PROFILE_EXIT_DISPATCH
122# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
123# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
124#else
125# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
126# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
127#endif
128
129#ifndef IN_NEM_DARWIN
130/** Assert that preemption is disabled or covered by thread-context hooks. */
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
132 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
133
134/** Assert that we haven't migrated CPUs when thread-context hooks are not
135 * used. */
136# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
137 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
138 ("Illegal migration! Entered on CPU %u Current %u\n", \
139 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
140#else
141# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
142# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
143#endif
144
145/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
146 * context. */
147#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
148 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
149 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
150
151/** Log the VM-exit reason with an easily visible marker to identify it in a
152 * potential sea of logging data. */
153#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
154 do { \
155 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
156 HMGetVmxExitName(a_uExitReason))); \
157 } while (0) \
158
159
160/*********************************************************************************************************************************
161* Structures and Typedefs *
162*********************************************************************************************************************************/
163/**
164 * Memory operand read or write access.
165 */
166typedef enum VMXMEMACCESS
167{
168 VMXMEMACCESS_READ = 0,
169 VMXMEMACCESS_WRITE = 1
170} VMXMEMACCESS;
171
172
173/**
174 * VMX VM-exit handler.
175 *
176 * @returns Strict VBox status code (i.e. informational status codes too).
177 * @param pVCpu The cross context virtual CPU structure.
178 * @param pVmxTransient The VMX-transient structure.
179 */
180#ifndef HMVMX_USE_FUNCTION_TABLE
181typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
182#else
183typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
184/** Pointer to VM-exit handler. */
185typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
186#endif
187
188/**
189 * VMX VM-exit handler, non-strict status code.
190 *
191 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
192 *
193 * @returns VBox status code, no informational status code returned.
194 * @param pVCpu The cross context virtual CPU structure.
195 * @param pVmxTransient The VMX-transient structure.
196 *
197 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
198 * use of that status code will be replaced with VINF_EM_SOMETHING
199 * later when switching over to IEM.
200 */
201#ifndef HMVMX_USE_FUNCTION_TABLE
202typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203#else
204typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
205#endif
206
207
208/*********************************************************************************************************************************
209* Internal Functions *
210*********************************************************************************************************************************/
211#ifndef HMVMX_USE_FUNCTION_TABLE
212DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
213# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
214# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
215#else
216# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
217# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
218#endif
219#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
220DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
221#endif
222
223static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
224
225/** @name VM-exit handler prototypes.
226 * @{
227 */
228static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
229static FNVMXEXITHANDLER vmxHCExitExtInt;
230static FNVMXEXITHANDLER vmxHCExitTripleFault;
231static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
232static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
233static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
234static FNVMXEXITHANDLER vmxHCExitCpuid;
235static FNVMXEXITHANDLER vmxHCExitGetsec;
236static FNVMXEXITHANDLER vmxHCExitHlt;
237static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
238static FNVMXEXITHANDLER vmxHCExitInvlpg;
239static FNVMXEXITHANDLER vmxHCExitRdpmc;
240static FNVMXEXITHANDLER vmxHCExitVmcall;
241#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
242static FNVMXEXITHANDLER vmxHCExitVmclear;
243static FNVMXEXITHANDLER vmxHCExitVmlaunch;
244static FNVMXEXITHANDLER vmxHCExitVmptrld;
245static FNVMXEXITHANDLER vmxHCExitVmptrst;
246static FNVMXEXITHANDLER vmxHCExitVmread;
247static FNVMXEXITHANDLER vmxHCExitVmresume;
248static FNVMXEXITHANDLER vmxHCExitVmwrite;
249static FNVMXEXITHANDLER vmxHCExitVmxoff;
250static FNVMXEXITHANDLER vmxHCExitVmxon;
251static FNVMXEXITHANDLER vmxHCExitInvvpid;
252# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
253static FNVMXEXITHANDLER vmxHCExitInvept;
254# endif
255#endif
256static FNVMXEXITHANDLER vmxHCExitRdtsc;
257static FNVMXEXITHANDLER vmxHCExitMovCRx;
258static FNVMXEXITHANDLER vmxHCExitMovDRx;
259static FNVMXEXITHANDLER vmxHCExitIoInstr;
260static FNVMXEXITHANDLER vmxHCExitRdmsr;
261static FNVMXEXITHANDLER vmxHCExitWrmsr;
262static FNVMXEXITHANDLER vmxHCExitMwait;
263static FNVMXEXITHANDLER vmxHCExitMtf;
264static FNVMXEXITHANDLER vmxHCExitMonitor;
265static FNVMXEXITHANDLER vmxHCExitPause;
266static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
267static FNVMXEXITHANDLER vmxHCExitApicAccess;
268static FNVMXEXITHANDLER vmxHCExitEptViolation;
269static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
270static FNVMXEXITHANDLER vmxHCExitRdtscp;
271static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
272static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
273static FNVMXEXITHANDLER vmxHCExitXsetbv;
274static FNVMXEXITHANDLER vmxHCExitInvpcid;
275static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
276static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
277static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
278/** @} */
279
280#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
281/** @name Nested-guest VM-exit handler prototypes.
282 * @{
283 */
284static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
285static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
286static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
287static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
288static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
289static FNVMXEXITHANDLER vmxHCExitHltNested;
290static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
291static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
292static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
293static FNVMXEXITHANDLER vmxHCExitRdtscNested;
294static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
295static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
296static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
297static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
298static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
299static FNVMXEXITHANDLER vmxHCExitMwaitNested;
300static FNVMXEXITHANDLER vmxHCExitMtfNested;
301static FNVMXEXITHANDLER vmxHCExitMonitorNested;
302static FNVMXEXITHANDLER vmxHCExitPauseNested;
303static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
304static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
305static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
306static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
307static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
308static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
309static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
310static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
311static FNVMXEXITHANDLER vmxHCExitInstrNested;
312static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
313/** @} */
314#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
315
316
317/*********************************************************************************************************************************
318* Global Variables *
319*********************************************************************************************************************************/
320#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
321/**
322 * Array of all VMCS fields.
323 * Any fields added to the VT-x spec. should be added here.
324 *
325 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
326 * of nested-guests.
327 */
328static const uint32_t g_aVmcsFields[] =
329{
330 /* 16-bit control fields. */
331 VMX_VMCS16_VPID,
332 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
333 VMX_VMCS16_EPTP_INDEX,
334
335 /* 16-bit guest-state fields. */
336 VMX_VMCS16_GUEST_ES_SEL,
337 VMX_VMCS16_GUEST_CS_SEL,
338 VMX_VMCS16_GUEST_SS_SEL,
339 VMX_VMCS16_GUEST_DS_SEL,
340 VMX_VMCS16_GUEST_FS_SEL,
341 VMX_VMCS16_GUEST_GS_SEL,
342 VMX_VMCS16_GUEST_LDTR_SEL,
343 VMX_VMCS16_GUEST_TR_SEL,
344 VMX_VMCS16_GUEST_INTR_STATUS,
345 VMX_VMCS16_GUEST_PML_INDEX,
346
347 /* 16-bits host-state fields. */
348 VMX_VMCS16_HOST_ES_SEL,
349 VMX_VMCS16_HOST_CS_SEL,
350 VMX_VMCS16_HOST_SS_SEL,
351 VMX_VMCS16_HOST_DS_SEL,
352 VMX_VMCS16_HOST_FS_SEL,
353 VMX_VMCS16_HOST_GS_SEL,
354 VMX_VMCS16_HOST_TR_SEL,
355
356 /* 64-bit control fields. */
357 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
358 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
359 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
360 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
361 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
362 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
363 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
364 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
365 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
366 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
367 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
368 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
369 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
370 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
371 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
372 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
373 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
374 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
375 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
376 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
377 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
378 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
379 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
380 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
381 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
382 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
383 VMX_VMCS64_CTRL_EPTP_FULL,
384 VMX_VMCS64_CTRL_EPTP_HIGH,
385 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
386 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
387 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
388 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
389 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
390 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
391 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
392 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
393 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
394 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
395 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
396 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
397 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
398 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
399 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
400 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
401 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
402 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
403 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
404 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
405 VMX_VMCS64_CTRL_SPPTP_FULL,
406 VMX_VMCS64_CTRL_SPPTP_HIGH,
407 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
408 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
409 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
410 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
411 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
412 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
413
414 /* 64-bit read-only data fields. */
415 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
416 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
417
418 /* 64-bit guest-state fields. */
419 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
420 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
421 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
422 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
423 VMX_VMCS64_GUEST_PAT_FULL,
424 VMX_VMCS64_GUEST_PAT_HIGH,
425 VMX_VMCS64_GUEST_EFER_FULL,
426 VMX_VMCS64_GUEST_EFER_HIGH,
427 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
428 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
429 VMX_VMCS64_GUEST_PDPTE0_FULL,
430 VMX_VMCS64_GUEST_PDPTE0_HIGH,
431 VMX_VMCS64_GUEST_PDPTE1_FULL,
432 VMX_VMCS64_GUEST_PDPTE1_HIGH,
433 VMX_VMCS64_GUEST_PDPTE2_FULL,
434 VMX_VMCS64_GUEST_PDPTE2_HIGH,
435 VMX_VMCS64_GUEST_PDPTE3_FULL,
436 VMX_VMCS64_GUEST_PDPTE3_HIGH,
437 VMX_VMCS64_GUEST_BNDCFGS_FULL,
438 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
439 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
440 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
441 VMX_VMCS64_GUEST_PKRS_FULL,
442 VMX_VMCS64_GUEST_PKRS_HIGH,
443
444 /* 64-bit host-state fields. */
445 VMX_VMCS64_HOST_PAT_FULL,
446 VMX_VMCS64_HOST_PAT_HIGH,
447 VMX_VMCS64_HOST_EFER_FULL,
448 VMX_VMCS64_HOST_EFER_HIGH,
449 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
450 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
451 VMX_VMCS64_HOST_PKRS_FULL,
452 VMX_VMCS64_HOST_PKRS_HIGH,
453
454 /* 32-bit control fields. */
455 VMX_VMCS32_CTRL_PIN_EXEC,
456 VMX_VMCS32_CTRL_PROC_EXEC,
457 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
458 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
459 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
460 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
461 VMX_VMCS32_CTRL_EXIT,
462 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
463 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
464 VMX_VMCS32_CTRL_ENTRY,
465 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
466 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
467 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
468 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
469 VMX_VMCS32_CTRL_TPR_THRESHOLD,
470 VMX_VMCS32_CTRL_PROC_EXEC2,
471 VMX_VMCS32_CTRL_PLE_GAP,
472 VMX_VMCS32_CTRL_PLE_WINDOW,
473
474 /* 32-bits read-only fields. */
475 VMX_VMCS32_RO_VM_INSTR_ERROR,
476 VMX_VMCS32_RO_EXIT_REASON,
477 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
478 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
479 VMX_VMCS32_RO_IDT_VECTORING_INFO,
480 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
481 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
482 VMX_VMCS32_RO_EXIT_INSTR_INFO,
483
484 /* 32-bit guest-state fields. */
485 VMX_VMCS32_GUEST_ES_LIMIT,
486 VMX_VMCS32_GUEST_CS_LIMIT,
487 VMX_VMCS32_GUEST_SS_LIMIT,
488 VMX_VMCS32_GUEST_DS_LIMIT,
489 VMX_VMCS32_GUEST_FS_LIMIT,
490 VMX_VMCS32_GUEST_GS_LIMIT,
491 VMX_VMCS32_GUEST_LDTR_LIMIT,
492 VMX_VMCS32_GUEST_TR_LIMIT,
493 VMX_VMCS32_GUEST_GDTR_LIMIT,
494 VMX_VMCS32_GUEST_IDTR_LIMIT,
495 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
496 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
497 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
498 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
501 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
502 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
503 VMX_VMCS32_GUEST_INT_STATE,
504 VMX_VMCS32_GUEST_ACTIVITY_STATE,
505 VMX_VMCS32_GUEST_SMBASE,
506 VMX_VMCS32_GUEST_SYSENTER_CS,
507 VMX_VMCS32_PREEMPT_TIMER_VALUE,
508
509 /* 32-bit host-state fields. */
510 VMX_VMCS32_HOST_SYSENTER_CS,
511
512 /* Natural-width control fields. */
513 VMX_VMCS_CTRL_CR0_MASK,
514 VMX_VMCS_CTRL_CR4_MASK,
515 VMX_VMCS_CTRL_CR0_READ_SHADOW,
516 VMX_VMCS_CTRL_CR4_READ_SHADOW,
517 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
518 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
519 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
520 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
521
522 /* Natural-width read-only data fields. */
523 VMX_VMCS_RO_EXIT_QUALIFICATION,
524 VMX_VMCS_RO_IO_RCX,
525 VMX_VMCS_RO_IO_RSI,
526 VMX_VMCS_RO_IO_RDI,
527 VMX_VMCS_RO_IO_RIP,
528 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
529
530 /* Natural-width guest-state field */
531 VMX_VMCS_GUEST_CR0,
532 VMX_VMCS_GUEST_CR3,
533 VMX_VMCS_GUEST_CR4,
534 VMX_VMCS_GUEST_ES_BASE,
535 VMX_VMCS_GUEST_CS_BASE,
536 VMX_VMCS_GUEST_SS_BASE,
537 VMX_VMCS_GUEST_DS_BASE,
538 VMX_VMCS_GUEST_FS_BASE,
539 VMX_VMCS_GUEST_GS_BASE,
540 VMX_VMCS_GUEST_LDTR_BASE,
541 VMX_VMCS_GUEST_TR_BASE,
542 VMX_VMCS_GUEST_GDTR_BASE,
543 VMX_VMCS_GUEST_IDTR_BASE,
544 VMX_VMCS_GUEST_DR7,
545 VMX_VMCS_GUEST_RSP,
546 VMX_VMCS_GUEST_RIP,
547 VMX_VMCS_GUEST_RFLAGS,
548 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
549 VMX_VMCS_GUEST_SYSENTER_ESP,
550 VMX_VMCS_GUEST_SYSENTER_EIP,
551 VMX_VMCS_GUEST_S_CET,
552 VMX_VMCS_GUEST_SSP,
553 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
554
555 /* Natural-width host-state fields */
556 VMX_VMCS_HOST_CR0,
557 VMX_VMCS_HOST_CR3,
558 VMX_VMCS_HOST_CR4,
559 VMX_VMCS_HOST_FS_BASE,
560 VMX_VMCS_HOST_GS_BASE,
561 VMX_VMCS_HOST_TR_BASE,
562 VMX_VMCS_HOST_GDTR_BASE,
563 VMX_VMCS_HOST_IDTR_BASE,
564 VMX_VMCS_HOST_SYSENTER_ESP,
565 VMX_VMCS_HOST_SYSENTER_EIP,
566 VMX_VMCS_HOST_RSP,
567 VMX_VMCS_HOST_RIP,
568 VMX_VMCS_HOST_S_CET,
569 VMX_VMCS_HOST_SSP,
570 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
571};
572#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
573
574#ifdef VBOX_STRICT
575static const uint32_t g_aVmcsSegBase[] =
576{
577 VMX_VMCS_GUEST_ES_BASE,
578 VMX_VMCS_GUEST_CS_BASE,
579 VMX_VMCS_GUEST_SS_BASE,
580 VMX_VMCS_GUEST_DS_BASE,
581 VMX_VMCS_GUEST_FS_BASE,
582 VMX_VMCS_GUEST_GS_BASE
583};
584static const uint32_t g_aVmcsSegSel[] =
585{
586 VMX_VMCS16_GUEST_ES_SEL,
587 VMX_VMCS16_GUEST_CS_SEL,
588 VMX_VMCS16_GUEST_SS_SEL,
589 VMX_VMCS16_GUEST_DS_SEL,
590 VMX_VMCS16_GUEST_FS_SEL,
591 VMX_VMCS16_GUEST_GS_SEL
592};
593static const uint32_t g_aVmcsSegLimit[] =
594{
595 VMX_VMCS32_GUEST_ES_LIMIT,
596 VMX_VMCS32_GUEST_CS_LIMIT,
597 VMX_VMCS32_GUEST_SS_LIMIT,
598 VMX_VMCS32_GUEST_DS_LIMIT,
599 VMX_VMCS32_GUEST_FS_LIMIT,
600 VMX_VMCS32_GUEST_GS_LIMIT
601};
602static const uint32_t g_aVmcsSegAttr[] =
603{
604 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
605 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
606 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
607 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
608 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
609 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
610};
611AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT);
612AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
613AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT);
614AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT);
615#endif /* VBOX_STRICT */
616
617#ifdef HMVMX_USE_FUNCTION_TABLE
618/**
619 * VMX_EXIT dispatch table.
620 */
621static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
622{
623 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
624 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
625 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
626 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
627 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
628 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
629 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
630 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
631 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
632 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
633 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
634 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
635 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
636 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
637 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
638 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
639 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
640 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
641 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
642#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
643 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
644 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
645 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
646 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
647 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
648 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
649 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
650 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
651 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
652#else
653 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
654 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
655 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
656 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
657 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
658 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
659 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
660 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
661 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
662#endif
663 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
664 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
665 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
666 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
667 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
668 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
669 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
670 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
671 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
672 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
673 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
674 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
675 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
676 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
677 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
678 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
679 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
680 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
681 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
682 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
683 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
684 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
685#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT)
686 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
687#else
688 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
689#endif
690 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
691 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
692#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
693 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
694#else
695 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
696#endif
697 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
698 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
699 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
700 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
701 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
702 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
703 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
704 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
705 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
706 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
707 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
708 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
709 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
710 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
711 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
712 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
713};
714#endif /* HMVMX_USE_FUNCTION_TABLE */
715
716#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
717static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
718{
719 /* 0 */ "(Not Used)",
720 /* 1 */ "VMCALL executed in VMX root operation.",
721 /* 2 */ "VMCLEAR with invalid physical address.",
722 /* 3 */ "VMCLEAR with VMXON pointer.",
723 /* 4 */ "VMLAUNCH with non-clear VMCS.",
724 /* 5 */ "VMRESUME with non-launched VMCS.",
725 /* 6 */ "VMRESUME after VMXOFF",
726 /* 7 */ "VM-entry with invalid control fields.",
727 /* 8 */ "VM-entry with invalid host state fields.",
728 /* 9 */ "VMPTRLD with invalid physical address.",
729 /* 10 */ "VMPTRLD with VMXON pointer.",
730 /* 11 */ "VMPTRLD with incorrect revision identifier.",
731 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
732 /* 13 */ "VMWRITE to read-only VMCS component.",
733 /* 14 */ "(Not Used)",
734 /* 15 */ "VMXON executed in VMX root operation.",
735 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
736 /* 17 */ "VM-entry with non-launched executing VMCS.",
737 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
738 /* 19 */ "VMCALL with non-clear VMCS.",
739 /* 20 */ "VMCALL with invalid VM-exit control fields.",
740 /* 21 */ "(Not Used)",
741 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
742 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
743 /* 24 */ "VMCALL with invalid SMM-monitor features.",
744 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
745 /* 26 */ "VM-entry with events blocked by MOV SS.",
746 /* 27 */ "(Not Used)",
747 /* 28 */ "Invalid operand to INVEPT/INVVPID."
748};
749#endif /* VBOX_STRICT && LOG_ENABLED */
750
751
752/**
753 * Gets the CR0 guest/host mask.
754 *
755 * These bits typically does not change through the lifetime of a VM. Any bit set in
756 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
757 * by the guest.
758 *
759 * @returns The CR0 guest/host mask.
760 * @param pVCpu The cross context virtual CPU structure.
761 */
762static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
763{
764 /*
765 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
766 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
767 *
768 * Furthermore, modifications to any bits that are reserved/unspecified currently
769 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
770 * when future CPUs specify and use currently reserved/unspecified bits.
771 */
772 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
773 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
774 * and @bugref{6944}. */
775 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
776 return ( X86_CR0_PE
777 | X86_CR0_NE
778 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
779 | X86_CR0_PG
780 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
781}
782
783
784/**
785 * Gets the CR4 guest/host mask.
786 *
787 * These bits typically does not change through the lifetime of a VM. Any bit set in
788 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
789 * by the guest.
790 *
791 * @returns The CR4 guest/host mask.
792 * @param pVCpu The cross context virtual CPU structure.
793 */
794static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
795{
796 /*
797 * We construct a mask of all CR4 bits that the guest can modify without causing
798 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
799 * a VM-exit when the guest attempts to modify them when executing using
800 * hardware-assisted VMX.
801 *
802 * When a feature is not exposed to the guest (and may be present on the host),
803 * we want to intercept guest modifications to the bit so we can emulate proper
804 * behavior (e.g., #GP).
805 *
806 * Furthermore, only modifications to those bits that don't require immediate
807 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
808 * depends on CR3 which might not always be the guest value while executing
809 * using hardware-assisted VMX.
810 */
811 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
812 bool const fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
813 bool const fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
814 bool const fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
815
816 /*
817 * Paranoia.
818 * Ensure features exposed to the guest are present on the host.
819 */
820 Assert(!fFsGsBase || pVM->cpum.ro.HostFeatures.fFsGsBase);
821 Assert(!fXSaveRstor || pVM->cpum.ro.HostFeatures.fXSaveRstor);
822 Assert(!fFxSaveRstor || pVM->cpum.ro.HostFeatures.fFxSaveRstor);
823
824 uint64_t const fGstMask = ( X86_CR4_PVI
825 | X86_CR4_TSD
826 | X86_CR4_DE
827 | X86_CR4_MCE
828 | X86_CR4_PCE
829 | X86_CR4_OSXMMEEXCPT
830 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
831 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
832 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0));
833 return ~fGstMask;
834}
835
836
837/**
838 * Adds one or more exceptions to the exception bitmap and commits it to the current
839 * VMCS.
840 *
841 * @param pVCpu The cross context virtual CPU structure.
842 * @param pVmxTransient The VMX-transient structure.
843 * @param uXcptMask The exception(s) to add.
844 */
845static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
846{
847 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
848 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
849 if ((uXcptBitmap & uXcptMask) != uXcptMask)
850 {
851 uXcptBitmap |= uXcptMask;
852 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
853 AssertRC(rc);
854 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
855 }
856}
857
858
859/**
860 * Adds an exception to the exception bitmap and commits it to the current VMCS.
861 *
862 * @param pVCpu The cross context virtual CPU structure.
863 * @param pVmxTransient The VMX-transient structure.
864 * @param uXcpt The exception to add.
865 */
866static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
867{
868 Assert(uXcpt <= X86_XCPT_LAST);
869 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
870}
871
872
873/**
874 * Remove one or more exceptions from the exception bitmap and commits it to the
875 * current VMCS.
876 *
877 * This takes care of not removing the exception intercept if a nested-guest
878 * requires the exception to be intercepted.
879 *
880 * @returns VBox status code.
881 * @param pVCpu The cross context virtual CPU structure.
882 * @param pVmxTransient The VMX-transient structure.
883 * @param uXcptMask The exception(s) to remove.
884 */
885static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
886{
887 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
888 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
889 if (u32XcptBitmap & uXcptMask)
890 {
891#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
892 if (!pVmxTransient->fIsNestedGuest)
893 { /* likely */ }
894 else
895 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
896#endif
897#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
898 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
899 | RT_BIT(X86_XCPT_DE)
900 | RT_BIT(X86_XCPT_NM)
901 | RT_BIT(X86_XCPT_TS)
902 | RT_BIT(X86_XCPT_UD)
903 | RT_BIT(X86_XCPT_NP)
904 | RT_BIT(X86_XCPT_SS)
905 | RT_BIT(X86_XCPT_GP)
906 | RT_BIT(X86_XCPT_PF)
907 | RT_BIT(X86_XCPT_MF));
908#elif defined(HMVMX_ALWAYS_TRAP_PF)
909 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
910#endif
911 if (uXcptMask)
912 {
913 /* Validate we are not removing any essential exception intercepts. */
914#ifndef IN_NEM_DARWIN
915 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
916#else
917 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
918#endif
919 NOREF(pVCpu);
920 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
921 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
922
923 /* Remove it from the exception bitmap. */
924 u32XcptBitmap &= ~uXcptMask;
925
926 /* Commit and update the cache if necessary. */
927 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
928 {
929 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
930 AssertRC(rc);
931 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
932 }
933 }
934 }
935 return VINF_SUCCESS;
936}
937
938
939/**
940 * Remove an exceptions from the exception bitmap and commits it to the current
941 * VMCS.
942 *
943 * @returns VBox status code.
944 * @param pVCpu The cross context virtual CPU structure.
945 * @param pVmxTransient The VMX-transient structure.
946 * @param uXcpt The exception to remove.
947 */
948static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
949{
950 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
951}
952
953
954#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
955/**
956 * Loads the shadow VMCS specified by the VMCS info. object.
957 *
958 * @returns VBox status code.
959 * @param pVmcsInfo The VMCS info. object.
960 *
961 * @remarks Can be called with interrupts disabled.
962 */
963static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
964{
965 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
966 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
967
968 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
969 if (RT_SUCCESS(rc))
970 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
971 return rc;
972}
973
974
975/**
976 * Clears the shadow VMCS specified by the VMCS info. object.
977 *
978 * @returns VBox status code.
979 * @param pVmcsInfo The VMCS info. object.
980 *
981 * @remarks Can be called with interrupts disabled.
982 */
983static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
984{
985 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
986 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
987
988 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
989 if (RT_SUCCESS(rc))
990 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
991 return rc;
992}
993
994
995/**
996 * Switches from and to the specified VMCSes.
997 *
998 * @returns VBox status code.
999 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
1000 * @param pVmcsInfoTo The VMCS info. object we are switching to.
1001 *
1002 * @remarks Called with interrupts disabled.
1003 */
1004static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
1005{
1006 /*
1007 * Clear the VMCS we are switching out if it has not already been cleared.
1008 * This will sync any CPU internal data back to the VMCS.
1009 */
1010 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1011 {
1012 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
1013 if (RT_SUCCESS(rc))
1014 {
1015 /*
1016 * The shadow VMCS, if any, would not be active at this point since we
1017 * would have cleared it while importing the virtual hardware-virtualization
1018 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
1019 * clear the shadow VMCS here, just assert for safety.
1020 */
1021 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
1022 }
1023 else
1024 return rc;
1025 }
1026
1027 /*
1028 * Clear the VMCS we are switching to if it has not already been cleared.
1029 * This will initialize the VMCS launch state to "clear" required for loading it.
1030 *
1031 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1032 */
1033 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1034 {
1035 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1036 if (RT_SUCCESS(rc))
1037 { /* likely */ }
1038 else
1039 return rc;
1040 }
1041
1042 /*
1043 * Finally, load the VMCS we are switching to.
1044 */
1045 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1046}
1047
1048
1049/**
1050 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1051 * caller.
1052 *
1053 * @returns VBox status code.
1054 * @param pVCpu The cross context virtual CPU structure.
1055 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1056 * true) or guest VMCS (pass false).
1057 */
1058static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1059{
1060 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1061 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1062
1063 PVMXVMCSINFO pVmcsInfoFrom;
1064 PVMXVMCSINFO pVmcsInfoTo;
1065 if (fSwitchToNstGstVmcs)
1066 {
1067 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1068 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1069 }
1070 else
1071 {
1072 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1073 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1074 }
1075
1076 /*
1077 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1078 * preemption hook code path acquires the current VMCS.
1079 */
1080 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1081
1082 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1083 if (RT_SUCCESS(rc))
1084 {
1085 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1086 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1087
1088 /*
1089 * If we are switching to a VMCS that was executed on a different host CPU or was
1090 * never executed before, flag that we need to export the host state before executing
1091 * guest/nested-guest code using hardware-assisted VMX.
1092 *
1093 * This could probably be done in a preemptible context since the preemption hook
1094 * will flag the necessary change in host context. However, since preemption is
1095 * already disabled and to avoid making assumptions about host specific code in
1096 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1097 * disabled.
1098 */
1099 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1100 { /* likely */ }
1101 else
1102 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1103
1104 ASMSetFlags(fEFlags);
1105
1106 /*
1107 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1108 * flag that we need to update the host MSR values there. Even if we decide in the
1109 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1110 * if its content differs, we would have to update the host MSRs anyway.
1111 */
1112 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1113 }
1114 else
1115 ASMSetFlags(fEFlags);
1116 return rc;
1117}
1118#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1119
1120
1121#ifdef VBOX_STRICT
1122/**
1123 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1124 * transient structure.
1125 *
1126 * @param pVCpu The cross context virtual CPU structure.
1127 * @param pVmxTransient The VMX-transient structure.
1128 */
1129DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1130{
1131 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1132 AssertRC(rc);
1133}
1134
1135
1136/**
1137 * Reads the VM-entry exception error code field from the VMCS into
1138 * the VMX transient structure.
1139 *
1140 * @param pVCpu The cross context virtual CPU structure.
1141 * @param pVmxTransient The VMX-transient structure.
1142 */
1143DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1144{
1145 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1146 AssertRC(rc);
1147}
1148
1149
1150/**
1151 * Reads the VM-entry exception error code field from the VMCS into
1152 * the VMX transient structure.
1153 *
1154 * @param pVCpu The cross context virtual CPU structure.
1155 * @param pVmxTransient The VMX-transient structure.
1156 */
1157DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1158{
1159 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1160 AssertRC(rc);
1161}
1162#endif /* VBOX_STRICT */
1163
1164
1165/**
1166 * Reads the VM-exit interruption-information field from the VMCS into the VMX
1167 * transient structure.
1168 *
1169 * @param pVCpu The cross context virtual CPU structure.
1170 * @param pVmxTransient The VMX-transient structure.
1171 */
1172DECLINLINE(void) vmxHCReadExitIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1173{
1174 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1175 {
1176 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1177 AssertRC(rc);
1178 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
1179 }
1180}
1181
1182
1183/**
1184 * Reads the VM-exit interruption error code from the VMCS into the VMX
1185 * transient structure.
1186 *
1187 * @param pVCpu The cross context virtual CPU structure.
1188 * @param pVmxTransient The VMX-transient structure.
1189 */
1190DECLINLINE(void) vmxHCReadExitIntErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1191{
1192 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1193 {
1194 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1195 AssertRC(rc);
1196 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
1197 }
1198}
1199
1200
1201/**
1202 * Reads the VM-exit instruction length field from the VMCS into the VMX
1203 * transient structure.
1204 *
1205 * @param pVCpu The cross context virtual CPU structure.
1206 * @param pVmxTransient The VMX-transient structure.
1207 */
1208DECLINLINE(void) vmxHCReadExitInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1209{
1210 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1211 {
1212 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1213 AssertRC(rc);
1214 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
1215 }
1216}
1217
1218
1219/**
1220 * Reads the VM-exit instruction-information field from the VMCS into
1221 * the VMX transient structure.
1222 *
1223 * @param pVCpu The cross context virtual CPU structure.
1224 * @param pVmxTransient The VMX-transient structure.
1225 */
1226DECLINLINE(void) vmxHCReadExitInstrInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1227{
1228 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1229 {
1230 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1231 AssertRC(rc);
1232 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
1233 }
1234}
1235
1236
1237/**
1238 * Reads the Exit Qualification from the VMCS into the VMX transient structure.
1239 *
1240 * @param pVCpu The cross context virtual CPU structure.
1241 * @param pVmxTransient The VMX-transient structure.
1242 */
1243DECLINLINE(void) vmxHCReadExitQualVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1244{
1245 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1246 {
1247 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1248 AssertRC(rc);
1249 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
1250 }
1251}
1252
1253
1254/**
1255 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
1256 *
1257 * @param pVCpu The cross context virtual CPU structure.
1258 * @param pVmxTransient The VMX-transient structure.
1259 */
1260DECLINLINE(void) vmxHCReadGuestLinearAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1261{
1262 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1263 {
1264 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1265 AssertRC(rc);
1266 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
1267 }
1268}
1269
1270
1271/**
1272 * Reads the Guest-physical address from the VMCS into the VMX transient structure.
1273 *
1274 * @param pVCpu The cross context virtual CPU structure.
1275 * @param pVmxTransient The VMX-transient structure.
1276 */
1277DECLINLINE(void) vmxHCReadGuestPhysicalAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1278{
1279 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1280 {
1281 int rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1282 AssertRC(rc);
1283 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PHYSICAL_ADDR;
1284 }
1285}
1286
1287#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1288/**
1289 * Reads the Guest pending-debug exceptions from the VMCS into the VMX transient
1290 * structure.
1291 *
1292 * @param pVCpu The cross context virtual CPU structure.
1293 * @param pVmxTransient The VMX-transient structure.
1294 */
1295DECLINLINE(void) vmxHCReadGuestPendingDbgXctps(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1296{
1297 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1298 {
1299 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1300 AssertRC(rc);
1301 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PENDING_DBG_XCPTS;
1302 }
1303}
1304#endif
1305
1306/**
1307 * Reads the IDT-vectoring information field from the VMCS into the VMX
1308 * transient structure.
1309 *
1310 * @param pVCpu The cross context virtual CPU structure.
1311 * @param pVmxTransient The VMX-transient structure.
1312 *
1313 * @remarks No-long-jump zone!!!
1314 */
1315DECLINLINE(void) vmxHCReadIdtVectoringInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1316{
1317 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1318 {
1319 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1320 AssertRC(rc);
1321 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
1322 }
1323}
1324
1325
1326/**
1327 * Reads the IDT-vectoring error code from the VMCS into the VMX
1328 * transient structure.
1329 *
1330 * @param pVCpu The cross context virtual CPU structure.
1331 * @param pVmxTransient The VMX-transient structure.
1332 */
1333DECLINLINE(void) vmxHCReadIdtVectoringErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1334{
1335 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1336 {
1337 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1338 AssertRC(rc);
1339 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
1340 }
1341}
1342
1343#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1344/**
1345 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1346 *
1347 * @param pVCpu The cross context virtual CPU structure.
1348 * @param pVmxTransient The VMX-transient structure.
1349 */
1350static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1351{
1352 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1353 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1354 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1355 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1356 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1357 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1358 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1359 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1360 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1361 AssertRC(rc);
1362 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1363 | HMVMX_READ_EXIT_INSTR_LEN
1364 | HMVMX_READ_EXIT_INSTR_INFO
1365 | HMVMX_READ_IDT_VECTORING_INFO
1366 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1367 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1368 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1369 | HMVMX_READ_GUEST_LINEAR_ADDR
1370 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1371}
1372#endif
1373
1374/**
1375 * Verifies that our cached values of the VMCS fields are all consistent with
1376 * what's actually present in the VMCS.
1377 *
1378 * @returns VBox status code.
1379 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1380 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1381 * VMCS content. HMCPU error-field is
1382 * updated, see VMX_VCI_XXX.
1383 * @param pVCpu The cross context virtual CPU structure.
1384 * @param pVmcsInfo The VMCS info. object.
1385 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1386 */
1387static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1388{
1389 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1390
1391 uint32_t u32Val;
1392 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1393 AssertRC(rc);
1394 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1395 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1396 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1397 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1398
1399 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1400 AssertRC(rc);
1401 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1402 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1403 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1404 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1405
1406 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1407 AssertRC(rc);
1408 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1409 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1410 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1411 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1412
1413 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1414 AssertRC(rc);
1415 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1416 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1417 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1418 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1419
1420 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1421 {
1422 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1423 AssertRC(rc);
1424 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1425 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1426 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1427 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1428 }
1429
1430 uint64_t u64Val;
1431 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1432 {
1433 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1434 AssertRC(rc);
1435 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1436 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1437 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1438 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1439 }
1440
1441 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1442 AssertRC(rc);
1443 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1444 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1445 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1446 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1447
1448 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1449 AssertRC(rc);
1450 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1451 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1452 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1453 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1454
1455 NOREF(pcszVmcs);
1456 return VINF_SUCCESS;
1457}
1458
1459
1460/**
1461 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1462 * VMCS.
1463 *
1464 * This is typically required when the guest changes paging mode.
1465 *
1466 * @returns VBox status code.
1467 * @param pVCpu The cross context virtual CPU structure.
1468 * @param pVmxTransient The VMX-transient structure.
1469 *
1470 * @remarks Requires EFER.
1471 * @remarks No-long-jump zone!!!
1472 */
1473static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1474{
1475 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1476 {
1477 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1478 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1479
1480 /*
1481 * VM-entry controls.
1482 */
1483 {
1484 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1485 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1486
1487 /*
1488 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1489 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1490 *
1491 * For nested-guests, this is a mandatory VM-entry control. It's also
1492 * required because we do not want to leak host bits to the nested-guest.
1493 */
1494 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1495
1496 /*
1497 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1498 *
1499 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1500 * required to get the nested-guest working with hardware-assisted VMX execution.
1501 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1502 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1503 * here rather than while merging the guest VMCS controls.
1504 */
1505 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1506 {
1507 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1508 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1509 }
1510 else
1511 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1512
1513 /*
1514 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1515 *
1516 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1517 * regardless of whether the nested-guest VMCS specifies it because we are free to
1518 * load whatever MSRs we require and we do not need to modify the guest visible copy
1519 * of the VM-entry MSR load area.
1520 */
1521 if ( g_fHmVmxSupportsVmcsEfer
1522#ifndef IN_NEM_DARWIN
1523 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1524#endif
1525 )
1526 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1527 else
1528 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1529
1530 /*
1531 * The following should -not- be set (since we're not in SMM mode):
1532 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1533 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1534 */
1535
1536 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1537 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1538
1539 if ((fVal & fZap) == fVal)
1540 { /* likely */ }
1541 else
1542 {
1543 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1544 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1545 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1546 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1547 }
1548
1549 /* Commit it to the VMCS. */
1550 if (pVmcsInfo->u32EntryCtls != fVal)
1551 {
1552 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1553 AssertRC(rc);
1554 pVmcsInfo->u32EntryCtls = fVal;
1555 }
1556 }
1557
1558 /*
1559 * VM-exit controls.
1560 */
1561 {
1562 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1563 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1564
1565 /*
1566 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1567 * supported the 1-setting of this bit.
1568 *
1569 * For nested-guests, we set the "save debug controls" as the converse
1570 * "load debug controls" is mandatory for nested-guests anyway.
1571 */
1572 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1573
1574 /*
1575 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1576 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1577 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1578 * vmxHCExportHostMsrs().
1579 *
1580 * For nested-guests, we always set this bit as we do not support 32-bit
1581 * hosts.
1582 */
1583 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1584
1585#ifndef IN_NEM_DARWIN
1586 /*
1587 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1588 *
1589 * For nested-guests, we should use the "save IA32_EFER" control if we also
1590 * used the "load IA32_EFER" control while exporting VM-entry controls.
1591 */
1592 if ( g_fHmVmxSupportsVmcsEfer
1593 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1594 {
1595 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1596 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1597 }
1598#endif
1599
1600 /*
1601 * Enable saving of the VMX-preemption timer value on VM-exit.
1602 * For nested-guests, currently not exposed/used.
1603 */
1604 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1605 * the timer value. */
1606 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1607 {
1608 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1609 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1610 }
1611
1612 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1613 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1614
1615 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1616 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1617 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1618
1619 if ((fVal & fZap) == fVal)
1620 { /* likely */ }
1621 else
1622 {
1623 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1624 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1625 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1626 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1627 }
1628
1629 /* Commit it to the VMCS. */
1630 if (pVmcsInfo->u32ExitCtls != fVal)
1631 {
1632 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1633 AssertRC(rc);
1634 pVmcsInfo->u32ExitCtls = fVal;
1635 }
1636 }
1637
1638 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1639 }
1640 return VINF_SUCCESS;
1641}
1642
1643
1644/**
1645 * Sets the TPR threshold in the VMCS.
1646 *
1647 * @param pVCpu The cross context virtual CPU structure.
1648 * @param pVmcsInfo The VMCS info. object.
1649 * @param u32TprThreshold The TPR threshold (task-priority class only).
1650 */
1651DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1652{
1653 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1654 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1655 RT_NOREF(pVmcsInfo);
1656 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1657 AssertRC(rc);
1658}
1659
1660
1661/**
1662 * Exports the guest APIC TPR state into the VMCS.
1663 *
1664 * @param pVCpu The cross context virtual CPU structure.
1665 * @param pVmxTransient The VMX-transient structure.
1666 *
1667 * @remarks No-long-jump zone!!!
1668 */
1669static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1670{
1671 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1672 {
1673 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1674
1675 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1676 if (!pVmxTransient->fIsNestedGuest)
1677 {
1678 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1679 && APICIsEnabled(pVCpu))
1680 {
1681 /*
1682 * Setup TPR shadowing.
1683 */
1684 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1685 {
1686 bool fPendingIntr = false;
1687 uint8_t u8Tpr = 0;
1688 uint8_t u8PendingIntr = 0;
1689 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1690 AssertRC(rc);
1691
1692 /*
1693 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1694 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1695 * priority of the pending interrupt so we can deliver the interrupt. If there
1696 * are no interrupts pending, set threshold to 0 to not cause any
1697 * TPR-below-threshold VM-exits.
1698 */
1699 uint32_t u32TprThreshold = 0;
1700 if (fPendingIntr)
1701 {
1702 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1703 (which is the Task-Priority Class). */
1704 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1705 const uint8_t u8TprPriority = u8Tpr >> 4;
1706 if (u8PendingPriority <= u8TprPriority)
1707 u32TprThreshold = u8PendingPriority;
1708 }
1709
1710 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1711 }
1712 }
1713 }
1714 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1715 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1716 }
1717}
1718
1719
1720/**
1721 * Gets the guest interruptibility-state and updates related force-flags.
1722 *
1723 * @returns Guest's interruptibility-state.
1724 * @param pVCpu The cross context virtual CPU structure.
1725 *
1726 * @remarks No-long-jump zone!!!
1727 */
1728static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1729{
1730 /*
1731 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1732 */
1733 uint32_t fIntrState = 0;
1734 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1735 {
1736 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
1737 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
1738
1739 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1740 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
1741 {
1742 if (pCtx->eflags.Bits.u1IF)
1743 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1744 else
1745 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1746 }
1747 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1748 {
1749 /*
1750 * We can clear the inhibit force flag as even if we go back to the recompiler
1751 * without executing guest code in VT-x, the flag's condition to be cleared is
1752 * met and thus the cleared state is correct.
1753 */
1754 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1755 }
1756 }
1757
1758 /*
1759 * Check if we should inhibit NMI delivery.
1760 */
1761 if (CPUMIsGuestNmiBlocking(pVCpu))
1762 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1763
1764 /*
1765 * Validate.
1766 */
1767#ifdef VBOX_STRICT
1768 /* We don't support block-by-SMI yet.*/
1769 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1770
1771 /* Block-by-STI must not be set when interrupts are disabled. */
1772 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
1773 {
1774 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1775 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
1776 }
1777#endif
1778
1779 return fIntrState;
1780}
1781
1782
1783/**
1784 * Exports the exception intercepts required for guest execution in the VMCS.
1785 *
1786 * @param pVCpu The cross context virtual CPU structure.
1787 * @param pVmxTransient The VMX-transient structure.
1788 *
1789 * @remarks No-long-jump zone!!!
1790 */
1791static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1792{
1793 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1794 {
1795 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1796 if ( !pVmxTransient->fIsNestedGuest
1797 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1798 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1799 else
1800 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1801
1802 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1803 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1804 }
1805}
1806
1807
1808/**
1809 * Exports the guest's RIP into the guest-state area in the VMCS.
1810 *
1811 * @param pVCpu The cross context virtual CPU structure.
1812 *
1813 * @remarks No-long-jump zone!!!
1814 */
1815static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1816{
1817 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1818 {
1819 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1820
1821 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1822 AssertRC(rc);
1823
1824 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1825 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1826 }
1827}
1828
1829
1830/**
1831 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1832 *
1833 * @param pVCpu The cross context virtual CPU structure.
1834 * @param pVmxTransient The VMX-transient structure.
1835 *
1836 * @remarks No-long-jump zone!!!
1837 */
1838static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1839{
1840 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1841 {
1842 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1843
1844 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1845 Let us assert it as such and use 32-bit VMWRITE. */
1846 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1847 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1848 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
1849 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
1850
1851#ifndef IN_NEM_DARWIN
1852 /*
1853 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1854 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1855 * can run the real-mode guest code under Virtual 8086 mode.
1856 */
1857 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1858 if (pVmcsInfo->RealMode.fRealOnV86Active)
1859 {
1860 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1861 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1862 Assert(!pVmxTransient->fIsNestedGuest);
1863 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
1864 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
1865 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1866 }
1867#else
1868 RT_NOREF(pVmxTransient);
1869#endif
1870
1871 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
1872 AssertRC(rc);
1873
1874 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1875 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
1876 }
1877}
1878
1879
1880#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1881/**
1882 * Copies the nested-guest VMCS to the shadow VMCS.
1883 *
1884 * @returns VBox status code.
1885 * @param pVCpu The cross context virtual CPU structure.
1886 * @param pVmcsInfo The VMCS info. object.
1887 *
1888 * @remarks No-long-jump zone!!!
1889 */
1890static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1891{
1892 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1893 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1894
1895 /*
1896 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1897 * current VMCS, as we may try saving guest lazy MSRs.
1898 *
1899 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1900 * calling the import VMCS code which is currently performing the guest MSR reads
1901 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1902 * and the rest of the VMX leave session machinery.
1903 */
1904 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1905
1906 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1907 if (RT_SUCCESS(rc))
1908 {
1909 /*
1910 * Copy all guest read/write VMCS fields.
1911 *
1912 * We don't check for VMWRITE failures here for performance reasons and
1913 * because they are not expected to fail, barring irrecoverable conditions
1914 * like hardware errors.
1915 */
1916 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1917 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1918 {
1919 uint64_t u64Val;
1920 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1921 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1922 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1923 }
1924
1925 /*
1926 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1927 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1928 */
1929 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1930 {
1931 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1932 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1933 {
1934 uint64_t u64Val;
1935 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1936 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1937 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1938 }
1939 }
1940
1941 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1942 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1943 }
1944
1945 ASMSetFlags(fEFlags);
1946 return rc;
1947}
1948
1949
1950/**
1951 * Copies the shadow VMCS to the nested-guest VMCS.
1952 *
1953 * @returns VBox status code.
1954 * @param pVCpu The cross context virtual CPU structure.
1955 * @param pVmcsInfo The VMCS info. object.
1956 *
1957 * @remarks Called with interrupts disabled.
1958 */
1959static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1960{
1961 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1962 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1963 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1964
1965 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1966 if (RT_SUCCESS(rc))
1967 {
1968 /*
1969 * Copy guest read/write fields from the shadow VMCS.
1970 * Guest read-only fields cannot be modified, so no need to copy them.
1971 *
1972 * We don't check for VMREAD failures here for performance reasons and
1973 * because they are not expected to fail, barring irrecoverable conditions
1974 * like hardware errors.
1975 */
1976 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1977 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1978 {
1979 uint64_t u64Val;
1980 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1981 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1982 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1983 }
1984
1985 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1986 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1987 }
1988 return rc;
1989}
1990
1991
1992/**
1993 * Enables VMCS shadowing for the given VMCS info. object.
1994 *
1995 * @param pVCpu The cross context virtual CPU structure.
1996 * @param pVmcsInfo The VMCS info. object.
1997 *
1998 * @remarks No-long-jump zone!!!
1999 */
2000static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2001{
2002 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2003 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
2004 {
2005 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
2006 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
2007 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2008 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
2009 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2010 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
2011 Log4Func(("Enabled\n"));
2012 }
2013}
2014
2015
2016/**
2017 * Disables VMCS shadowing for the given VMCS info. object.
2018 *
2019 * @param pVCpu The cross context virtual CPU structure.
2020 * @param pVmcsInfo The VMCS info. object.
2021 *
2022 * @remarks No-long-jump zone!!!
2023 */
2024static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2025{
2026 /*
2027 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
2028 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
2029 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2030 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2031 *
2032 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2033 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2034 */
2035 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2036 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2037 {
2038 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2039 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2040 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2041 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2042 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2043 Log4Func(("Disabled\n"));
2044 }
2045}
2046#endif
2047
2048
2049/**
2050 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2051 *
2052 * The guest FPU state is always pre-loaded hence we don't need to bother about
2053 * sharing FPU related CR0 bits between the guest and host.
2054 *
2055 * @returns VBox status code.
2056 * @param pVCpu The cross context virtual CPU structure.
2057 * @param pVmxTransient The VMX-transient structure.
2058 *
2059 * @remarks No-long-jump zone!!!
2060 */
2061static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2062{
2063 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2064 {
2065 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2066 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2067
2068 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2069 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2070 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2071 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2072 else
2073 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2074
2075 if (!pVmxTransient->fIsNestedGuest)
2076 {
2077 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2078 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2079 uint64_t const u64ShadowCr0 = u64GuestCr0;
2080 Assert(!RT_HI_U32(u64GuestCr0));
2081
2082 /*
2083 * Setup VT-x's view of the guest CR0.
2084 */
2085 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2086 if (VM_IS_VMX_NESTED_PAGING(pVM))
2087 {
2088 if (CPUMIsGuestPagingEnabled(pVCpu))
2089 {
2090 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2091 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2092 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2093 }
2094 else
2095 {
2096 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2097 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2098 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2099 }
2100
2101 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2102 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2103 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2104 }
2105 else
2106 {
2107 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2108 u64GuestCr0 |= X86_CR0_WP;
2109 }
2110
2111 /*
2112 * Guest FPU bits.
2113 *
2114 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2115 * using CR0.TS.
2116 *
2117 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2118 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2119 */
2120 u64GuestCr0 |= X86_CR0_NE;
2121
2122 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2123 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2124
2125 /*
2126 * Update exception intercepts.
2127 */
2128 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2129#ifndef IN_NEM_DARWIN
2130 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2131 {
2132 Assert(PDMVmmDevHeapIsEnabled(pVM));
2133 Assert(pVM->hm.s.vmx.pRealModeTSS);
2134 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2135 }
2136 else
2137#endif
2138 {
2139 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2140 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2141 if (fInterceptMF)
2142 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2143 }
2144
2145 /* Additional intercepts for debugging, define these yourself explicitly. */
2146#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2147 uXcptBitmap |= 0
2148 | RT_BIT(X86_XCPT_BP)
2149 | RT_BIT(X86_XCPT_DE)
2150 | RT_BIT(X86_XCPT_NM)
2151 | RT_BIT(X86_XCPT_TS)
2152 | RT_BIT(X86_XCPT_UD)
2153 | RT_BIT(X86_XCPT_NP)
2154 | RT_BIT(X86_XCPT_SS)
2155 | RT_BIT(X86_XCPT_GP)
2156 | RT_BIT(X86_XCPT_PF)
2157 | RT_BIT(X86_XCPT_MF)
2158 ;
2159#elif defined(HMVMX_ALWAYS_TRAP_PF)
2160 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2161#endif
2162 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2163 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2164 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2165
2166 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2167 u64GuestCr0 |= fSetCr0;
2168 u64GuestCr0 &= fZapCr0;
2169 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2170
2171 /* Commit the CR0 and related fields to the guest VMCS. */
2172 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2173 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2174 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2175 {
2176 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2177 AssertRC(rc);
2178 }
2179 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2180 {
2181 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2182 AssertRC(rc);
2183 }
2184
2185 /* Update our caches. */
2186 pVmcsInfo->u32ProcCtls = uProcCtls;
2187 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2188
2189 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2190 }
2191 else
2192 {
2193 /*
2194 * With nested-guests, we may have extended the guest/host mask here since we
2195 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2196 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2197 * originally supplied. We must copy those bits from the nested-guest CR0 into
2198 * the nested-guest CR0 read-shadow.
2199 */
2200 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2201 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2202 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2203 Assert(!RT_HI_U32(u64GuestCr0));
2204 Assert(u64GuestCr0 & X86_CR0_NE);
2205
2206 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2207 u64GuestCr0 |= fSetCr0;
2208 u64GuestCr0 &= fZapCr0;
2209 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2210
2211 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2212 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2213 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2214
2215 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2216 }
2217
2218 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2219 }
2220
2221 return VINF_SUCCESS;
2222}
2223
2224
2225/**
2226 * Exports the guest control registers (CR3, CR4) into the guest-state area
2227 * in the VMCS.
2228 *
2229 * @returns VBox strict status code.
2230 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2231 * without unrestricted guest access and the VMMDev is not presently
2232 * mapped (e.g. EFI32).
2233 *
2234 * @param pVCpu The cross context virtual CPU structure.
2235 * @param pVmxTransient The VMX-transient structure.
2236 *
2237 * @remarks No-long-jump zone!!!
2238 */
2239static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2240{
2241 int rc = VINF_SUCCESS;
2242 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2243
2244 /*
2245 * Guest CR2.
2246 * It's always loaded in the assembler code. Nothing to do here.
2247 */
2248
2249 /*
2250 * Guest CR3.
2251 */
2252 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2253 {
2254 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2255
2256 if (VM_IS_VMX_NESTED_PAGING(pVM))
2257 {
2258#ifndef IN_NEM_DARWIN
2259 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2260 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2261
2262 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2263 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2264 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2265 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2266
2267 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2268 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2269 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2270
2271 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2272 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2273 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2274 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2275 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2276 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2277 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2278
2279 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2280 AssertRC(rc);
2281#endif
2282
2283 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2284 uint64_t u64GuestCr3 = pCtx->cr3;
2285 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2286 || CPUMIsGuestPagingEnabledEx(pCtx))
2287 {
2288 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2289 if (CPUMIsGuestInPAEModeEx(pCtx))
2290 {
2291 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2292 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2293 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2294 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2295 }
2296
2297 /*
2298 * The guest's view of its CR3 is unblemished with nested paging when the
2299 * guest is using paging or we have unrestricted guest execution to handle
2300 * the guest when it's not using paging.
2301 */
2302 }
2303#ifndef IN_NEM_DARWIN
2304 else
2305 {
2306 /*
2307 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2308 * thinks it accesses physical memory directly, we use our identity-mapped
2309 * page table to map guest-linear to guest-physical addresses. EPT takes care
2310 * of translating it to host-physical addresses.
2311 */
2312 RTGCPHYS GCPhys;
2313 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2314
2315 /* We obtain it here every time as the guest could have relocated this PCI region. */
2316 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2317 if (RT_SUCCESS(rc))
2318 { /* likely */ }
2319 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2320 {
2321 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2322 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2323 }
2324 else
2325 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2326
2327 u64GuestCr3 = GCPhys;
2328 }
2329#endif
2330
2331 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2332 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2333 AssertRC(rc);
2334 }
2335 else
2336 {
2337 Assert(!pVmxTransient->fIsNestedGuest);
2338 /* Non-nested paging case, just use the hypervisor's CR3. */
2339 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2340
2341 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2342 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2343 AssertRC(rc);
2344 }
2345
2346 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2347 }
2348
2349 /*
2350 * Guest CR4.
2351 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2352 */
2353 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2354 {
2355 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2356 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2357
2358 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2359 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2360
2361 /*
2362 * With nested-guests, we may have extended the guest/host mask here (since we
2363 * merged in the outer guest's mask, see vmxHCMergeVmcsNested). This means, the
2364 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2365 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2366 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2367 */
2368 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2369 uint64_t u64GuestCr4 = pCtx->cr4;
2370 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2371 ? pCtx->cr4
2372 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2373 Assert(!RT_HI_U32(u64GuestCr4));
2374
2375#ifndef IN_NEM_DARWIN
2376 /*
2377 * Setup VT-x's view of the guest CR4.
2378 *
2379 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2380 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2381 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2382 *
2383 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2384 */
2385 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2386 {
2387 Assert(pVM->hm.s.vmx.pRealModeTSS);
2388 Assert(PDMVmmDevHeapIsEnabled(pVM));
2389 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2390 }
2391#endif
2392
2393 if (VM_IS_VMX_NESTED_PAGING(pVM))
2394 {
2395 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2396 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2397 {
2398 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2399 u64GuestCr4 |= X86_CR4_PSE;
2400 /* Our identity mapping is a 32-bit page directory. */
2401 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2402 }
2403 /* else use guest CR4.*/
2404 }
2405 else
2406 {
2407 Assert(!pVmxTransient->fIsNestedGuest);
2408
2409 /*
2410 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2411 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2412 */
2413 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2414 {
2415 case PGMMODE_REAL: /* Real-mode. */
2416 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2417 case PGMMODE_32_BIT: /* 32-bit paging. */
2418 {
2419 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2420 break;
2421 }
2422
2423 case PGMMODE_PAE: /* PAE paging. */
2424 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2425 {
2426 u64GuestCr4 |= X86_CR4_PAE;
2427 break;
2428 }
2429
2430 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2431 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2432 {
2433#ifdef VBOX_WITH_64_BITS_GUESTS
2434 /* For our assumption in vmxHCShouldSwapEferMsr. */
2435 Assert(u64GuestCr4 & X86_CR4_PAE);
2436 break;
2437#endif
2438 }
2439 default:
2440 AssertFailed();
2441 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2442 }
2443 }
2444
2445 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2446 u64GuestCr4 |= fSetCr4;
2447 u64GuestCr4 &= fZapCr4;
2448
2449 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2450 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2451 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2452
2453#ifndef IN_NEM_DARWIN
2454 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2455 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2456 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2457 {
2458 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2459 hmR0VmxUpdateStartVmFunction(pVCpu);
2460 }
2461#endif
2462
2463 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2464
2465 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2466 }
2467 return rc;
2468}
2469
2470
2471#ifdef VBOX_STRICT
2472/**
2473 * Strict function to validate segment registers.
2474 *
2475 * @param pVCpu The cross context virtual CPU structure.
2476 * @param pVmcsInfo The VMCS info. object.
2477 *
2478 * @remarks Will import guest CR0 on strict builds during validation of
2479 * segments.
2480 */
2481static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2482{
2483 /*
2484 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2485 *
2486 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2487 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2488 * unusable bit and doesn't change the guest-context value.
2489 */
2490 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2491 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2492 vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2493 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2494 && ( !CPUMIsGuestInRealModeEx(pCtx)
2495 && !CPUMIsGuestInV86ModeEx(pCtx)))
2496 {
2497 /* Protected mode checks */
2498 /* CS */
2499 Assert(pCtx->cs.Attr.n.u1Present);
2500 Assert(!(pCtx->cs.Attr.u & 0xf00));
2501 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2502 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2503 || !(pCtx->cs.Attr.n.u1Granularity));
2504 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2505 || (pCtx->cs.Attr.n.u1Granularity));
2506 /* CS cannot be loaded with NULL in protected mode. */
2507 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2508 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2509 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2510 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2511 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2512 else
2513 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2514 /* SS */
2515 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2516 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2517 if ( !(pCtx->cr0 & X86_CR0_PE)
2518 || pCtx->cs.Attr.n.u4Type == 3)
2519 {
2520 Assert(!pCtx->ss.Attr.n.u2Dpl);
2521 }
2522 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2523 {
2524 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2525 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2526 Assert(pCtx->ss.Attr.n.u1Present);
2527 Assert(!(pCtx->ss.Attr.u & 0xf00));
2528 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2529 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2530 || !(pCtx->ss.Attr.n.u1Granularity));
2531 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2532 || (pCtx->ss.Attr.n.u1Granularity));
2533 }
2534 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2535 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2536 {
2537 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2538 Assert(pCtx->ds.Attr.n.u1Present);
2539 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2540 Assert(!(pCtx->ds.Attr.u & 0xf00));
2541 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2542 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2543 || !(pCtx->ds.Attr.n.u1Granularity));
2544 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2545 || (pCtx->ds.Attr.n.u1Granularity));
2546 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2547 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2548 }
2549 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2550 {
2551 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2552 Assert(pCtx->es.Attr.n.u1Present);
2553 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2554 Assert(!(pCtx->es.Attr.u & 0xf00));
2555 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2556 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2557 || !(pCtx->es.Attr.n.u1Granularity));
2558 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2559 || (pCtx->es.Attr.n.u1Granularity));
2560 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2561 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2562 }
2563 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2564 {
2565 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2566 Assert(pCtx->fs.Attr.n.u1Present);
2567 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2568 Assert(!(pCtx->fs.Attr.u & 0xf00));
2569 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2570 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2571 || !(pCtx->fs.Attr.n.u1Granularity));
2572 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2573 || (pCtx->fs.Attr.n.u1Granularity));
2574 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2575 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2576 }
2577 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2578 {
2579 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2580 Assert(pCtx->gs.Attr.n.u1Present);
2581 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2582 Assert(!(pCtx->gs.Attr.u & 0xf00));
2583 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2584 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2585 || !(pCtx->gs.Attr.n.u1Granularity));
2586 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2587 || (pCtx->gs.Attr.n.u1Granularity));
2588 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2589 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2590 }
2591 /* 64-bit capable CPUs. */
2592 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2593 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2594 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2595 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2596 }
2597 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2598 || ( CPUMIsGuestInRealModeEx(pCtx)
2599 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2600 {
2601 /* Real and v86 mode checks. */
2602 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2603 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2604#ifndef IN_NEM_DARWIN
2605 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2606 {
2607 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2608 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2609 }
2610 else
2611#endif
2612 {
2613 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2614 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2615 }
2616
2617 /* CS */
2618 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2619 Assert(pCtx->cs.u32Limit == 0xffff);
2620 Assert(u32CSAttr == 0xf3);
2621 /* SS */
2622 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2623 Assert(pCtx->ss.u32Limit == 0xffff);
2624 Assert(u32SSAttr == 0xf3);
2625 /* DS */
2626 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2627 Assert(pCtx->ds.u32Limit == 0xffff);
2628 Assert(u32DSAttr == 0xf3);
2629 /* ES */
2630 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2631 Assert(pCtx->es.u32Limit == 0xffff);
2632 Assert(u32ESAttr == 0xf3);
2633 /* FS */
2634 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2635 Assert(pCtx->fs.u32Limit == 0xffff);
2636 Assert(u32FSAttr == 0xf3);
2637 /* GS */
2638 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2639 Assert(pCtx->gs.u32Limit == 0xffff);
2640 Assert(u32GSAttr == 0xf3);
2641 /* 64-bit capable CPUs. */
2642 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2643 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2644 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2645 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2646 }
2647}
2648#endif /* VBOX_STRICT */
2649
2650
2651/**
2652 * Exports a guest segment register into the guest-state area in the VMCS.
2653 *
2654 * @returns VBox status code.
2655 * @param pVCpu The cross context virtual CPU structure.
2656 * @param pVmcsInfo The VMCS info. object.
2657 * @param iSegReg The segment register number (X86_SREG_XXX).
2658 * @param pSelReg Pointer to the segment selector.
2659 *
2660 * @remarks No-long-jump zone!!!
2661 */
2662static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2663{
2664 Assert(iSegReg < X86_SREG_COUNT);
2665
2666 uint32_t u32Access = pSelReg->Attr.u;
2667#ifndef IN_NEM_DARWIN
2668 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2669#endif
2670 {
2671 /*
2672 * The way to differentiate between whether this is really a null selector or was just
2673 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2674 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2675 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2676 * NULL selectors loaded in protected-mode have their attribute as 0.
2677 */
2678 if (u32Access)
2679 { }
2680 else
2681 u32Access = X86DESCATTR_UNUSABLE;
2682 }
2683#ifndef IN_NEM_DARWIN
2684 else
2685 {
2686 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2687 u32Access = 0xf3;
2688 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2689 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2690 RT_NOREF_PV(pVCpu);
2691 }
2692#else
2693 RT_NOREF(pVmcsInfo);
2694#endif
2695
2696 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2697 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2698 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2699
2700 /*
2701 * Commit it to the VMCS.
2702 */
2703 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
2704 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
2705 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
2706 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
2707 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2708 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2709 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2710 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2711 return VINF_SUCCESS;
2712}
2713
2714
2715/**
2716 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2717 * area in the VMCS.
2718 *
2719 * @returns VBox status code.
2720 * @param pVCpu The cross context virtual CPU structure.
2721 * @param pVmxTransient The VMX-transient structure.
2722 *
2723 * @remarks Will import guest CR0 on strict builds during validation of
2724 * segments.
2725 * @remarks No-long-jump zone!!!
2726 */
2727static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2728{
2729 int rc = VERR_INTERNAL_ERROR_5;
2730#ifndef IN_NEM_DARWIN
2731 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2732#endif
2733 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2734 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2735#ifndef IN_NEM_DARWIN
2736 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2737#endif
2738
2739 /*
2740 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2741 */
2742 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2743 {
2744 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2745 {
2746 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2747#ifndef IN_NEM_DARWIN
2748 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2749 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2750#endif
2751 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2752 AssertRC(rc);
2753 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2754 }
2755
2756 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2757 {
2758 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2759#ifndef IN_NEM_DARWIN
2760 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2761 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2762#endif
2763 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2764 AssertRC(rc);
2765 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2766 }
2767
2768 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2769 {
2770 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2771#ifndef IN_NEM_DARWIN
2772 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2773 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2774#endif
2775 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2776 AssertRC(rc);
2777 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2778 }
2779
2780 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2781 {
2782 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2783#ifndef IN_NEM_DARWIN
2784 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2785 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2786#endif
2787 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2788 AssertRC(rc);
2789 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2790 }
2791
2792 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2793 {
2794 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2795#ifndef IN_NEM_DARWIN
2796 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2797 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2798#endif
2799 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2800 AssertRC(rc);
2801 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2802 }
2803
2804 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2805 {
2806 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2807#ifndef IN_NEM_DARWIN
2808 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2809 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2810#endif
2811 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2812 AssertRC(rc);
2813 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2814 }
2815
2816#ifdef VBOX_STRICT
2817 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2818#endif
2819 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2820 pCtx->cs.Attr.u));
2821 }
2822
2823 /*
2824 * Guest TR.
2825 */
2826 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2827 {
2828 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2829
2830 /*
2831 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2832 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2833 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2834 */
2835 uint16_t u16Sel;
2836 uint32_t u32Limit;
2837 uint64_t u64Base;
2838 uint32_t u32AccessRights;
2839#ifndef IN_NEM_DARWIN
2840 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2841#endif
2842 {
2843 u16Sel = pCtx->tr.Sel;
2844 u32Limit = pCtx->tr.u32Limit;
2845 u64Base = pCtx->tr.u64Base;
2846 u32AccessRights = pCtx->tr.Attr.u;
2847 }
2848#ifndef IN_NEM_DARWIN
2849 else
2850 {
2851 Assert(!pVmxTransient->fIsNestedGuest);
2852 Assert(pVM->hm.s.vmx.pRealModeTSS);
2853 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2854
2855 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2856 RTGCPHYS GCPhys;
2857 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2858 AssertRCReturn(rc, rc);
2859
2860 X86DESCATTR DescAttr;
2861 DescAttr.u = 0;
2862 DescAttr.n.u1Present = 1;
2863 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2864
2865 u16Sel = 0;
2866 u32Limit = HM_VTX_TSS_SIZE;
2867 u64Base = GCPhys;
2868 u32AccessRights = DescAttr.u;
2869 }
2870#endif
2871
2872 /* Validate. */
2873 Assert(!(u16Sel & RT_BIT(2)));
2874 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2875 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2876 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2877 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2878 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2879 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2880 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2881 Assert( (u32Limit & 0xfff) == 0xfff
2882 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2883 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2884 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2885
2886 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2887 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2888 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2889 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2890
2891 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2892 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2893 }
2894
2895 /*
2896 * Guest GDTR.
2897 */
2898 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2899 {
2900 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2901
2902 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2903 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2904
2905 /* Validate. */
2906 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2907
2908 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2909 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2910 }
2911
2912 /*
2913 * Guest LDTR.
2914 */
2915 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2916 {
2917 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2918
2919 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2920 uint32_t u32Access;
2921 if ( !pVmxTransient->fIsNestedGuest
2922 && !pCtx->ldtr.Attr.u)
2923 u32Access = X86DESCATTR_UNUSABLE;
2924 else
2925 u32Access = pCtx->ldtr.Attr.u;
2926
2927 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2928 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2929 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2930 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2931
2932 /* Validate. */
2933 if (!(u32Access & X86DESCATTR_UNUSABLE))
2934 {
2935 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2936 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2937 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2938 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2939 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2940 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2941 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2942 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2943 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2944 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2945 }
2946
2947 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2948 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2949 }
2950
2951 /*
2952 * Guest IDTR.
2953 */
2954 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2955 {
2956 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2957
2958 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2959 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2960
2961 /* Validate. */
2962 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2963
2964 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2965 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2966 }
2967
2968 return VINF_SUCCESS;
2969}
2970
2971
2972/**
2973 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2974 * VM-exit interruption info type.
2975 *
2976 * @returns The IEM exception flags.
2977 * @param uVector The event vector.
2978 * @param uVmxEventType The VMX event type.
2979 *
2980 * @remarks This function currently only constructs flags required for
2981 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2982 * and CR2 aspects of an exception are not included).
2983 */
2984static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2985{
2986 uint32_t fIemXcptFlags;
2987 switch (uVmxEventType)
2988 {
2989 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2990 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2991 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2992 break;
2993
2994 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2995 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2996 break;
2997
2998 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2999 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
3000 break;
3001
3002 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
3003 {
3004 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3005 if (uVector == X86_XCPT_BP)
3006 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
3007 else if (uVector == X86_XCPT_OF)
3008 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
3009 else
3010 {
3011 fIemXcptFlags = 0;
3012 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
3013 }
3014 break;
3015 }
3016
3017 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
3018 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3019 break;
3020
3021 default:
3022 fIemXcptFlags = 0;
3023 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3024 break;
3025 }
3026 return fIemXcptFlags;
3027}
3028
3029
3030/**
3031 * Sets an event as a pending event to be injected into the guest.
3032 *
3033 * @param pVCpu The cross context virtual CPU structure.
3034 * @param u32IntInfo The VM-entry interruption-information field.
3035 * @param cbInstr The VM-entry instruction length in bytes (for
3036 * software interrupts, exceptions and privileged
3037 * software exceptions).
3038 * @param u32ErrCode The VM-entry exception error code.
3039 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3040 * page-fault.
3041 */
3042DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3043 RTGCUINTPTR GCPtrFaultAddress)
3044{
3045 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3046 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3047 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3048 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3049 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3050 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3051}
3052
3053
3054/**
3055 * Sets an external interrupt as pending-for-injection into the VM.
3056 *
3057 * @param pVCpu The cross context virtual CPU structure.
3058 * @param u8Interrupt The external interrupt vector.
3059 */
3060DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3061{
3062 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3063 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3064 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3065 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3066 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3067}
3068
3069
3070/**
3071 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3072 *
3073 * @param pVCpu The cross context virtual CPU structure.
3074 */
3075DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3076{
3077 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3078 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3079 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3080 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3081 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3082}
3083
3084
3085/**
3086 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3087 *
3088 * @param pVCpu The cross context virtual CPU structure.
3089 */
3090DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3091{
3092 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3093 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3094 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3095 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3096 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3097}
3098
3099
3100/**
3101 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3102 *
3103 * @param pVCpu The cross context virtual CPU structure.
3104 */
3105DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3106{
3107 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3108 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3109 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3110 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3111 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3112}
3113
3114
3115/**
3116 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3117 *
3118 * @param pVCpu The cross context virtual CPU structure.
3119 */
3120DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3121{
3122 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3123 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3124 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3125 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3126 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3127}
3128
3129
3130#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3131/**
3132 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3133 *
3134 * @param pVCpu The cross context virtual CPU structure.
3135 * @param u32ErrCode The error code for the general-protection exception.
3136 */
3137DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3138{
3139 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3140 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3141 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3142 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3143 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3144}
3145
3146
3147/**
3148 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3149 *
3150 * @param pVCpu The cross context virtual CPU structure.
3151 * @param u32ErrCode The error code for the stack exception.
3152 */
3153DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3154{
3155 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3156 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3157 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3158 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3159 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3160}
3161#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3162
3163
3164/**
3165 * Fixes up attributes for the specified segment register.
3166 *
3167 * @param pVCpu The cross context virtual CPU structure.
3168 * @param pSelReg The segment register that needs fixing.
3169 * @param pszRegName The register name (for logging and assertions).
3170 */
3171static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3172{
3173 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3174
3175 /*
3176 * If VT-x marks the segment as unusable, most other bits remain undefined:
3177 * - For CS the L, D and G bits have meaning.
3178 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3179 * - For the remaining data segments no bits are defined.
3180 *
3181 * The present bit and the unusable bit has been observed to be set at the
3182 * same time (the selector was supposed to be invalid as we started executing
3183 * a V8086 interrupt in ring-0).
3184 *
3185 * What should be important for the rest of the VBox code, is that the P bit is
3186 * cleared. Some of the other VBox code recognizes the unusable bit, but
3187 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3188 * safe side here, we'll strip off P and other bits we don't care about. If
3189 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3190 *
3191 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3192 */
3193#ifdef VBOX_STRICT
3194 uint32_t const uAttr = pSelReg->Attr.u;
3195#endif
3196
3197 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3198 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3199 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3200
3201#ifdef VBOX_STRICT
3202# ifndef IN_NEM_DARWIN
3203 VMMRZCallRing3Disable(pVCpu);
3204# endif
3205 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3206# ifdef DEBUG_bird
3207 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3208 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3209 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3210# endif
3211# ifndef IN_NEM_DARWIN
3212 VMMRZCallRing3Enable(pVCpu);
3213# endif
3214 NOREF(uAttr);
3215#endif
3216 RT_NOREF2(pVCpu, pszRegName);
3217}
3218
3219
3220/**
3221 * Imports a guest segment register from the current VMCS into the guest-CPU
3222 * context.
3223 *
3224 * @param pVCpu The cross context virtual CPU structure.
3225 * @param iSegReg The segment register number (X86_SREG_XXX).
3226 *
3227 * @remarks Called with interrupts and/or preemption disabled.
3228 */
3229static void vmxHCImportGuestSegReg(PVMCPUCC pVCpu, uint32_t iSegReg)
3230{
3231 Assert(iSegReg < X86_SREG_COUNT);
3232 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
3233 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
3234 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
3235 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
3236
3237 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
3238
3239 uint16_t u16Sel;
3240 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), &u16Sel); AssertRC(rc);
3241 pSelReg->Sel = u16Sel;
3242 pSelReg->ValidSel = u16Sel;
3243
3244 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3245 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), &pSelReg->u64Base); AssertRC(rc);
3246
3247 uint32_t u32Attr;
3248 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), &u32Attr); AssertRC(rc);
3249 pSelReg->Attr.u = u32Attr;
3250 if (u32Attr & X86DESCATTR_UNUSABLE)
3251 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + iSegReg * 3);
3252
3253 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3254}
3255
3256
3257/**
3258 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3259 *
3260 * @param pVCpu The cross context virtual CPU structure.
3261 *
3262 * @remarks Called with interrupts and/or preemption disabled.
3263 */
3264static void vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3265{
3266 uint16_t u16Sel;
3267 uint64_t u64Base;
3268 uint32_t u32Limit, u32Attr;
3269 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3270 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3271 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3272 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3273
3274 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3275 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3276 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3277 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3278 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3279 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3280 if (u32Attr & X86DESCATTR_UNUSABLE)
3281 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3282}
3283
3284
3285/**
3286 * Imports the guest TR from the current VMCS into the guest-CPU context.
3287 *
3288 * @param pVCpu The cross context virtual CPU structure.
3289 *
3290 * @remarks Called with interrupts and/or preemption disabled.
3291 */
3292static void vmxHCImportGuestTr(PVMCPUCC pVCpu)
3293{
3294 uint16_t u16Sel;
3295 uint64_t u64Base;
3296 uint32_t u32Limit, u32Attr;
3297 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3298 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3299 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3300 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3301
3302 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3303 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3304 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3305 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3306 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3307 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3308 /* TR is the only selector that can never be unusable. */
3309 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3310}
3311
3312
3313/**
3314 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3315 *
3316 * @param pVCpu The cross context virtual CPU structure.
3317 *
3318 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3319 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3320 * instead!!!
3321 */
3322static void vmxHCImportGuestRip(PVMCPUCC pVCpu)
3323{
3324 uint64_t u64Val;
3325 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3326 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
3327 {
3328 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3329 AssertRC(rc);
3330
3331 pCtx->rip = u64Val;
3332 EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
3333 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
3334 }
3335}
3336
3337
3338/**
3339 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3340 *
3341 * @param pVCpu The cross context virtual CPU structure.
3342 * @param pVmcsInfo The VMCS info. object.
3343 *
3344 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3345 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3346 * instead!!!
3347 */
3348static void vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3349{
3350 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3351 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
3352 {
3353 uint64_t u64Val;
3354 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3355 AssertRC(rc);
3356
3357 pCtx->rflags.u64 = u64Val;
3358#ifndef IN_NEM_DARWIN
3359 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3360 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3361 {
3362 pCtx->eflags.Bits.u1VM = 0;
3363 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3364 }
3365#else
3366 RT_NOREF(pVmcsInfo);
3367#endif
3368 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3369 }
3370}
3371
3372
3373/**
3374 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3375 * context.
3376 *
3377 * @param pVCpu The cross context virtual CPU structure.
3378 * @param pVmcsInfo The VMCS info. object.
3379 *
3380 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3381 * do not log!
3382 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3383 * instead!!!
3384 */
3385static void vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3386{
3387 uint32_t u32Val;
3388 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3389 if (!u32Val)
3390 {
3391 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3392 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3393 CPUMSetGuestNmiBlocking(pVCpu, false);
3394 }
3395 else
3396 {
3397 /*
3398 * We must import RIP here to set our EM interrupt-inhibited state.
3399 * We also import RFLAGS as our code that evaluates pending interrupts
3400 * before VM-entry requires it.
3401 */
3402 vmxHCImportGuestRip(pVCpu);
3403 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3404
3405 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3406 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
3407 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3408 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3409
3410 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
3411 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
3412 }
3413}
3414
3415
3416/**
3417 * Worker for VMXR0ImportStateOnDemand.
3418 *
3419 * @returns VBox status code.
3420 * @param pVCpu The cross context virtual CPU structure.
3421 * @param pVmcsInfo The VMCS info. object.
3422 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3423 */
3424static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3425{
3426 int rc = VINF_SUCCESS;
3427 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3428 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3429 uint32_t u32Val;
3430
3431 /*
3432 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3433 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3434 * neither are other host platforms.
3435 *
3436 * Committing this temporarily as it prevents BSOD.
3437 *
3438 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3439 */
3440# ifdef RT_OS_WINDOWS
3441 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3442 return VERR_HM_IPE_1;
3443# endif
3444
3445 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3446
3447#ifndef IN_NEM_DARWIN
3448 /*
3449 * We disable interrupts to make the updating of the state and in particular
3450 * the fExtrn modification atomic wrt to preemption hooks.
3451 */
3452 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3453#endif
3454
3455 fWhat &= pCtx->fExtrn;
3456 if (fWhat)
3457 {
3458 do
3459 {
3460 if (fWhat & CPUMCTX_EXTRN_RIP)
3461 vmxHCImportGuestRip(pVCpu);
3462
3463 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3464 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3465
3466 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3467 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3468
3469 if (fWhat & CPUMCTX_EXTRN_RSP)
3470 {
3471 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3472 AssertRC(rc);
3473 }
3474
3475 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3476 {
3477 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3478#ifndef IN_NEM_DARWIN
3479 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3480#else
3481 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3482#endif
3483 if (fWhat & CPUMCTX_EXTRN_CS)
3484 {
3485 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
3486 vmxHCImportGuestRip(pVCpu);
3487 if (fRealOnV86Active)
3488 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3489 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3490 }
3491 if (fWhat & CPUMCTX_EXTRN_SS)
3492 {
3493 vmxHCImportGuestSegReg(pVCpu, X86_SREG_SS);
3494 if (fRealOnV86Active)
3495 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3496 }
3497 if (fWhat & CPUMCTX_EXTRN_DS)
3498 {
3499 vmxHCImportGuestSegReg(pVCpu, X86_SREG_DS);
3500 if (fRealOnV86Active)
3501 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3502 }
3503 if (fWhat & CPUMCTX_EXTRN_ES)
3504 {
3505 vmxHCImportGuestSegReg(pVCpu, X86_SREG_ES);
3506 if (fRealOnV86Active)
3507 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3508 }
3509 if (fWhat & CPUMCTX_EXTRN_FS)
3510 {
3511 vmxHCImportGuestSegReg(pVCpu, X86_SREG_FS);
3512 if (fRealOnV86Active)
3513 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3514 }
3515 if (fWhat & CPUMCTX_EXTRN_GS)
3516 {
3517 vmxHCImportGuestSegReg(pVCpu, X86_SREG_GS);
3518 if (fRealOnV86Active)
3519 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3520 }
3521 }
3522
3523 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3524 {
3525 if (fWhat & CPUMCTX_EXTRN_LDTR)
3526 vmxHCImportGuestLdtr(pVCpu);
3527
3528 if (fWhat & CPUMCTX_EXTRN_GDTR)
3529 {
3530 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3531 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3532 pCtx->gdtr.cbGdt = u32Val;
3533 }
3534
3535 /* Guest IDTR. */
3536 if (fWhat & CPUMCTX_EXTRN_IDTR)
3537 {
3538 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3539 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3540 pCtx->idtr.cbIdt = u32Val;
3541 }
3542
3543 /* Guest TR. */
3544 if (fWhat & CPUMCTX_EXTRN_TR)
3545 {
3546#ifndef IN_NEM_DARWIN
3547 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3548 don't need to import that one. */
3549 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3550#endif
3551 vmxHCImportGuestTr(pVCpu);
3552 }
3553 }
3554
3555 if (fWhat & CPUMCTX_EXTRN_DR7)
3556 {
3557#ifndef IN_NEM_DARWIN
3558 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3559#endif
3560 {
3561 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3562 AssertRC(rc);
3563 }
3564 }
3565
3566 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3567 {
3568 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3569 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3570 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3571 pCtx->SysEnter.cs = u32Val;
3572 }
3573
3574#ifndef IN_NEM_DARWIN
3575 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3576 {
3577 if ( pVM->hmr0.s.fAllow64BitGuests
3578 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3579 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3580 }
3581
3582 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3583 {
3584 if ( pVM->hmr0.s.fAllow64BitGuests
3585 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3586 {
3587 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3588 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3589 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3590 }
3591 }
3592
3593 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3594 {
3595 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3596 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3597 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3598 Assert(pMsrs);
3599 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3600 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3601 for (uint32_t i = 0; i < cMsrs; i++)
3602 {
3603 uint32_t const idMsr = pMsrs[i].u32Msr;
3604 switch (idMsr)
3605 {
3606 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3607 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3608 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3609 default:
3610 {
3611 uint32_t idxLbrMsr;
3612 if (VM_IS_VMX_LBR(pVM))
3613 {
3614 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3615 {
3616 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3617 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3618 break;
3619 }
3620 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3621 {
3622 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3623 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3624 break;
3625 }
3626 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3627 {
3628 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3629 break;
3630 }
3631 /* Fallthru (no break) */
3632 }
3633 pCtx->fExtrn = 0;
3634 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3635 ASMSetFlags(fEFlags);
3636 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3637 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3638 }
3639 }
3640 }
3641 }
3642#endif
3643
3644 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3645 {
3646 if (fWhat & CPUMCTX_EXTRN_CR0)
3647 {
3648 uint64_t u64Cr0;
3649 uint64_t u64Shadow;
3650 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3651 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3652#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3653 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3654 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3655#else
3656 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3657 {
3658 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3659 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3660 }
3661 else
3662 {
3663 /*
3664 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3665 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3666 * re-construct CR0. See @bugref{9180#c95} for details.
3667 */
3668 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3669 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3670 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3671 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3672 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3673 }
3674#endif
3675#ifndef IN_NEM_DARWIN
3676 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3677#endif
3678 CPUMSetGuestCR0(pVCpu, u64Cr0);
3679#ifndef IN_NEM_DARWIN
3680 VMMRZCallRing3Enable(pVCpu);
3681#endif
3682 }
3683
3684 if (fWhat & CPUMCTX_EXTRN_CR4)
3685 {
3686 uint64_t u64Cr4;
3687 uint64_t u64Shadow;
3688 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3689 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3690#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3691 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3692 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3693#else
3694 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3695 {
3696 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3697 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3698 }
3699 else
3700 {
3701 /*
3702 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3703 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3704 * re-construct CR4. See @bugref{9180#c95} for details.
3705 */
3706 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3707 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3708 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3709 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3710 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3711 }
3712#endif
3713 pCtx->cr4 = u64Cr4;
3714 }
3715
3716 if (fWhat & CPUMCTX_EXTRN_CR3)
3717 {
3718 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3719 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3720 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3721 && CPUMIsGuestPagingEnabledEx(pCtx)))
3722 {
3723 uint64_t u64Cr3;
3724 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3725 if (pCtx->cr3 != u64Cr3)
3726 {
3727 pCtx->cr3 = u64Cr3;
3728 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3729 }
3730
3731 /*
3732 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3733 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3734 */
3735 if (CPUMIsGuestInPAEModeEx(pCtx))
3736 {
3737 X86PDPE aPaePdpes[4];
3738 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3739 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3740 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3741 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3742 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3743 {
3744 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3745 /* PGM now updates PAE PDPTEs while updating CR3. */
3746 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3747 }
3748 }
3749 }
3750 }
3751 }
3752
3753#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3754 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3755 {
3756 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3757 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3758 {
3759 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3760 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3761 if (RT_SUCCESS(rc))
3762 { /* likely */ }
3763 else
3764 break;
3765 }
3766 }
3767#endif
3768 } while (0);
3769
3770 if (RT_SUCCESS(rc))
3771 {
3772 /* Update fExtrn. */
3773 pCtx->fExtrn &= ~fWhat;
3774
3775 /* If everything has been imported, clear the HM keeper bit. */
3776 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3777 {
3778#ifndef IN_NEM_DARWIN
3779 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3780#else
3781 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3782#endif
3783 Assert(!pCtx->fExtrn);
3784 }
3785 }
3786 }
3787#ifndef IN_NEM_DARWIN
3788 else
3789 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3790
3791 /*
3792 * Restore interrupts.
3793 */
3794 ASMSetFlags(fEFlags);
3795#endif
3796
3797 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3798
3799 if (RT_SUCCESS(rc))
3800 { /* likely */ }
3801 else
3802 return rc;
3803
3804 /*
3805 * Honor any pending CR3 updates.
3806 *
3807 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3808 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3809 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3810 *
3811 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3812 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3813 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3814 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3815 *
3816 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3817 *
3818 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3819 */
3820 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3821#ifndef IN_NEM_DARWIN
3822 && VMMRZCallRing3IsEnabled(pVCpu)
3823#endif
3824 )
3825 {
3826 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3827 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3828 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3829 }
3830
3831 return VINF_SUCCESS;
3832}
3833
3834
3835/**
3836 * Check per-VM and per-VCPU force flag actions that require us to go back to
3837 * ring-3 for one reason or another.
3838 *
3839 * @returns Strict VBox status code (i.e. informational status codes too)
3840 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3841 * ring-3.
3842 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3843 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3844 * interrupts)
3845 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3846 * all EMTs to be in ring-3.
3847 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3848 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3849 * to the EM loop.
3850 *
3851 * @param pVCpu The cross context virtual CPU structure.
3852 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
3853 * @param fStepping Whether we are single-stepping the guest using the
3854 * hypervisor debugger.
3855 *
3856 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
3857 * is no longer in VMX non-root mode.
3858 */
3859static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
3860{
3861#ifndef IN_NEM_DARWIN
3862 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3863#endif
3864
3865 /*
3866 * Update pending interrupts into the APIC's IRR.
3867 */
3868 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3869 APICUpdatePendingInterrupts(pVCpu);
3870
3871 /*
3872 * Anything pending? Should be more likely than not if we're doing a good job.
3873 */
3874 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3875 if ( !fStepping
3876 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
3877 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
3878 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
3879 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3880 return VINF_SUCCESS;
3881
3882 /* Pending PGM C3 sync. */
3883 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
3884 {
3885 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3886 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
3887 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
3888 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
3889 if (rcStrict != VINF_SUCCESS)
3890 {
3891 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
3892 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
3893 return rcStrict;
3894 }
3895 }
3896
3897 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
3898 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
3899 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3900 {
3901 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
3902 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
3903 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
3904 return rc;
3905 }
3906
3907 /* Pending VM request packets, such as hardware interrupts. */
3908 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
3909 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
3910 {
3911 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
3912 Log4Func(("Pending VM request forcing us back to ring-3\n"));
3913 return VINF_EM_PENDING_REQUEST;
3914 }
3915
3916 /* Pending PGM pool flushes. */
3917 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3918 {
3919 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
3920 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
3921 return VINF_PGM_POOL_FLUSH_PENDING;
3922 }
3923
3924 /* Pending DMA requests. */
3925 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
3926 {
3927 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
3928 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
3929 return VINF_EM_RAW_TO_R3;
3930 }
3931
3932#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3933 /*
3934 * Pending nested-guest events.
3935 *
3936 * Please note the priority of these events are specified and important.
3937 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
3938 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
3939 */
3940 if (fIsNestedGuest)
3941 {
3942 /* Pending nested-guest APIC-write. */
3943 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3944 {
3945 Log4Func(("Pending nested-guest APIC-write\n"));
3946 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
3947 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3948 return rcStrict;
3949 }
3950
3951 /* Pending nested-guest monitor-trap flag (MTF). */
3952 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
3953 {
3954 Log4Func(("Pending nested-guest MTF\n"));
3955 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
3956 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3957 return rcStrict;
3958 }
3959
3960 /* Pending nested-guest VMX-preemption timer expired. */
3961 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
3962 {
3963 Log4Func(("Pending nested-guest preempt timer\n"));
3964 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
3965 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3966 return rcStrict;
3967 }
3968 }
3969#else
3970 NOREF(fIsNestedGuest);
3971#endif
3972
3973 return VINF_SUCCESS;
3974}
3975
3976
3977/**
3978 * Converts any TRPM trap into a pending HM event. This is typically used when
3979 * entering from ring-3 (not longjmp returns).
3980 *
3981 * @param pVCpu The cross context virtual CPU structure.
3982 */
3983static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
3984{
3985 Assert(TRPMHasTrap(pVCpu));
3986 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3987
3988 uint8_t uVector;
3989 TRPMEVENT enmTrpmEvent;
3990 uint32_t uErrCode;
3991 RTGCUINTPTR GCPtrFaultAddress;
3992 uint8_t cbInstr;
3993 bool fIcebp;
3994
3995 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
3996 AssertRC(rc);
3997
3998 uint32_t u32IntInfo;
3999 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4000 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4001
4002 rc = TRPMResetTrap(pVCpu);
4003 AssertRC(rc);
4004 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4005 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4006
4007 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4008}
4009
4010
4011/**
4012 * Converts the pending HM event into a TRPM trap.
4013 *
4014 * @param pVCpu The cross context virtual CPU structure.
4015 */
4016static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4017{
4018 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4019
4020 /* If a trap was already pending, we did something wrong! */
4021 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4022
4023 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4024 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4025 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4026
4027 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4028
4029 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4030 AssertRC(rc);
4031
4032 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4033 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4034
4035 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4036 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4037 else
4038 {
4039 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4040 switch (uVectorType)
4041 {
4042 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4043 TRPMSetTrapDueToIcebp(pVCpu);
4044 RT_FALL_THRU();
4045 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4046 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4047 {
4048 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4049 || ( uVector == X86_XCPT_BP /* INT3 */
4050 || uVector == X86_XCPT_OF /* INTO */
4051 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4052 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4053 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4054 break;
4055 }
4056 }
4057 }
4058
4059 /* We're now done converting the pending event. */
4060 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4061}
4062
4063
4064/**
4065 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4066 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4067 *
4068 * @param pVCpu The cross context virtual CPU structure.
4069 * @param pVmcsInfo The VMCS info. object.
4070 */
4071static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4072{
4073 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4074 {
4075 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4076 {
4077 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4078 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4079 AssertRC(rc);
4080 }
4081 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4082}
4083
4084
4085/**
4086 * Clears the interrupt-window exiting control in the VMCS.
4087 *
4088 * @param pVCpu The cross context virtual CPU structure.
4089 * @param pVmcsInfo The VMCS info. object.
4090 */
4091DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4092{
4093 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4094 {
4095 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4096 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4097 AssertRC(rc);
4098 }
4099}
4100
4101
4102/**
4103 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4104 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4105 *
4106 * @param pVCpu The cross context virtual CPU structure.
4107 * @param pVmcsInfo The VMCS info. object.
4108 */
4109static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4110{
4111 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4112 {
4113 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4114 {
4115 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4116 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4117 AssertRC(rc);
4118 Log4Func(("Setup NMI-window exiting\n"));
4119 }
4120 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4121}
4122
4123
4124/**
4125 * Clears the NMI-window exiting control in the VMCS.
4126 *
4127 * @param pVCpu The cross context virtual CPU structure.
4128 * @param pVmcsInfo The VMCS info. object.
4129 */
4130DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4131{
4132 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4133 {
4134 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4135 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4136 AssertRC(rc);
4137 }
4138}
4139
4140
4141/**
4142 * Injects an event into the guest upon VM-entry by updating the relevant fields
4143 * in the VM-entry area in the VMCS.
4144 *
4145 * @returns Strict VBox status code (i.e. informational status codes too).
4146 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4147 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4148 *
4149 * @param pVCpu The cross context virtual CPU structure.
4150 * @param pVmxTransient The VMX-transient structure.
4151 * @param pEvent The event being injected.
4152 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4153 * will be updated if necessary. This cannot not be NULL.
4154 * @param fStepping Whether we're single-stepping guest execution and should
4155 * return VINF_EM_DBG_STEPPED if the event is injected
4156 * directly (registers modified by us, not by hardware on
4157 * VM-entry).
4158 */
4159static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent, bool fStepping,
4160 uint32_t *pfIntrState)
4161{
4162 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4163 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4164 Assert(pfIntrState);
4165
4166#ifdef IN_NEM_DARWIN
4167 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4168#endif
4169
4170 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4171 uint32_t u32IntInfo = pEvent->u64IntInfo;
4172 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4173 uint32_t const cbInstr = pEvent->cbInstr;
4174 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4175 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4176 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4177
4178#ifdef VBOX_STRICT
4179 /*
4180 * Validate the error-code-valid bit for hardware exceptions.
4181 * No error codes for exceptions in real-mode.
4182 *
4183 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4184 */
4185 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4186 && !CPUMIsGuestInRealModeEx(pCtx))
4187 {
4188 switch (uVector)
4189 {
4190 case X86_XCPT_PF:
4191 case X86_XCPT_DF:
4192 case X86_XCPT_TS:
4193 case X86_XCPT_NP:
4194 case X86_XCPT_SS:
4195 case X86_XCPT_GP:
4196 case X86_XCPT_AC:
4197 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4198 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4199 RT_FALL_THRU();
4200 default:
4201 break;
4202 }
4203 }
4204
4205 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4206 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4207 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4208#endif
4209
4210 RT_NOREF(uVector);
4211 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4212 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4213 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4214 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4215 {
4216 Assert(uVector <= X86_XCPT_LAST);
4217 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4218 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4219 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4220 }
4221 else
4222 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4223
4224 /*
4225 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4226 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4227 * interrupt handler in the (real-mode) guest.
4228 *
4229 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4230 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4231 */
4232 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4233 {
4234#ifndef IN_NEM_DARWIN
4235 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4236#endif
4237 {
4238 /*
4239 * For CPUs with unrestricted guest execution enabled and with the guest
4240 * in real-mode, we must not set the deliver-error-code bit.
4241 *
4242 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4243 */
4244 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4245 }
4246#ifndef IN_NEM_DARWIN
4247 else
4248 {
4249 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4250 Assert(PDMVmmDevHeapIsEnabled(pVM));
4251 Assert(pVM->hm.s.vmx.pRealModeTSS);
4252 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4253
4254 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4255 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4256 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4257 AssertRCReturn(rc2, rc2);
4258
4259 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4260 size_t const cbIdtEntry = sizeof(X86IDTR16);
4261 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4262 {
4263 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4264 if (uVector == X86_XCPT_DF)
4265 return VINF_EM_RESET;
4266
4267 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4268 No error codes for exceptions in real-mode. */
4269 if (uVector == X86_XCPT_GP)
4270 {
4271 uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4272 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4273 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4274 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4275 HMEVENT EventXcptDf;
4276 RT_ZERO(EventXcptDf);
4277 EventXcptDf.u64IntInfo = uXcptDfInfo;
4278 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptDf, fStepping, pfIntrState);
4279 }
4280
4281 /*
4282 * If we're injecting an event with no valid IDT entry, inject a #GP.
4283 * No error codes for exceptions in real-mode.
4284 *
4285 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4286 */
4287 uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4288 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4289 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4290 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4291 HMEVENT EventXcptGp;
4292 RT_ZERO(EventXcptGp);
4293 EventXcptGp.u64IntInfo = uXcptGpInfo;
4294 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptGp, fStepping, pfIntrState);
4295 }
4296
4297 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4298 uint16_t uGuestIp = pCtx->ip;
4299 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4300 {
4301 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4302 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4303 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4304 }
4305 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4306 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4307
4308 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4309 X86IDTR16 IdtEntry;
4310 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4311 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4312 AssertRCReturn(rc2, rc2);
4313
4314 /* Construct the stack frame for the interrupt/exception handler. */
4315 VBOXSTRICTRC rcStrict;
4316 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4317 if (rcStrict == VINF_SUCCESS)
4318 {
4319 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4320 if (rcStrict == VINF_SUCCESS)
4321 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4322 }
4323
4324 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4325 if (rcStrict == VINF_SUCCESS)
4326 {
4327 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4328 pCtx->rip = IdtEntry.offSel;
4329 pCtx->cs.Sel = IdtEntry.uSel;
4330 pCtx->cs.ValidSel = IdtEntry.uSel;
4331 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4332 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4333 && uVector == X86_XCPT_PF)
4334 pCtx->cr2 = GCPtrFault;
4335
4336 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4337 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4338 | HM_CHANGED_GUEST_RSP);
4339
4340 /*
4341 * If we delivered a hardware exception (other than an NMI) and if there was
4342 * block-by-STI in effect, we should clear it.
4343 */
4344 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4345 {
4346 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4347 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4348 Log4Func(("Clearing inhibition due to STI\n"));
4349 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4350 }
4351
4352 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4353 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4354
4355 /*
4356 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4357 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4358 */
4359 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4360
4361 /*
4362 * If we eventually support nested-guest execution without unrestricted guest execution,
4363 * we should set fInterceptEvents here.
4364 */
4365 Assert(!fIsNestedGuest);
4366
4367 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4368 if (fStepping)
4369 rcStrict = VINF_EM_DBG_STEPPED;
4370 }
4371 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4372 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4373 return rcStrict;
4374 }
4375#else
4376 RT_NOREF(pVmcsInfo);
4377#endif
4378 }
4379
4380 /*
4381 * Validate.
4382 */
4383 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4384 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4385
4386 /*
4387 * Inject the event into the VMCS.
4388 */
4389 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4390 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4391 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4392 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4393 AssertRC(rc);
4394
4395 /*
4396 * Update guest CR2 if this is a page-fault.
4397 */
4398 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4399 pCtx->cr2 = GCPtrFault;
4400
4401 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4402 return VINF_SUCCESS;
4403}
4404
4405
4406/**
4407 * Evaluates the event to be delivered to the guest and sets it as the pending
4408 * event.
4409 *
4410 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4411 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4412 * NOT restore these force-flags.
4413 *
4414 * @returns Strict VBox status code (i.e. informational status codes too).
4415 * @param pVCpu The cross context virtual CPU structure.
4416 * @param pVmcsInfo The VMCS information structure.
4417 * @param fIsNestedGuest Flag whether the evaluation happens for a nestd guest.
4418 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4419 */
4420static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4421{
4422 Assert(pfIntrState);
4423 Assert(!TRPMHasTrap(pVCpu));
4424
4425 /*
4426 * Compute/update guest-interruptibility state related FFs.
4427 * The FFs will be used below while evaluating events to be injected.
4428 */
4429 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4430
4431 /*
4432 * Evaluate if a new event needs to be injected.
4433 * An event that's already pending has already performed all necessary checks.
4434 */
4435 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4436 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4437 {
4438 /** @todo SMI. SMIs take priority over NMIs. */
4439
4440 /*
4441 * NMIs.
4442 * NMIs take priority over external interrupts.
4443 */
4444#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4445 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4446#endif
4447 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4448 {
4449 /*
4450 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4451 *
4452 * For a nested-guest, the FF always indicates the outer guest's ability to
4453 * receive an NMI while the guest-interruptibility state bit depends on whether
4454 * the nested-hypervisor is using virtual-NMIs.
4455 */
4456 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4457 {
4458#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4459 if ( fIsNestedGuest
4460 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4461 return IEMExecVmxVmexitXcptNmi(pVCpu);
4462#endif
4463 vmxHCSetPendingXcptNmi(pVCpu);
4464 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4465 Log4Func(("NMI pending injection\n"));
4466
4467 /* We've injected the NMI, bail. */
4468 return VINF_SUCCESS;
4469 }
4470 else if (!fIsNestedGuest)
4471 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4472 }
4473
4474 /*
4475 * External interrupts (PIC/APIC).
4476 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4477 * We cannot re-request the interrupt from the controller again.
4478 */
4479 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4480 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4481 {
4482 Assert(!DBGFIsStepping(pVCpu));
4483 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4484 AssertRC(rc);
4485
4486 /*
4487 * We must not check EFLAGS directly when executing a nested-guest, use
4488 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4489 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4490 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4491 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4492 *
4493 * See Intel spec. 25.4.1 "Event Blocking".
4494 */
4495 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4496 {
4497#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4498 if ( fIsNestedGuest
4499 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4500 {
4501 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4502 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4503 return rcStrict;
4504 }
4505#endif
4506 uint8_t u8Interrupt;
4507 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4508 if (RT_SUCCESS(rc))
4509 {
4510#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4511 if ( fIsNestedGuest
4512 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4513 {
4514 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4515 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4516 return rcStrict;
4517 }
4518#endif
4519 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4520 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4521 }
4522 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4523 {
4524 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4525
4526 if ( !fIsNestedGuest
4527 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4528 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4529 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4530
4531 /*
4532 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4533 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4534 * need to re-set this force-flag here.
4535 */
4536 }
4537 else
4538 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4539
4540 /* We've injected the interrupt or taken necessary action, bail. */
4541 return VINF_SUCCESS;
4542 }
4543 if (!fIsNestedGuest)
4544 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4545 }
4546 }
4547 else if (!fIsNestedGuest)
4548 {
4549 /*
4550 * An event is being injected or we are in an interrupt shadow. Check if another event is
4551 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
4552 * the pending event.
4553 */
4554 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4555 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4556 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4557 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4558 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4559 }
4560 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
4561
4562 return VINF_SUCCESS;
4563}
4564
4565
4566/**
4567 * Injects any pending events into the guest if the guest is in a state to
4568 * receive them.
4569 *
4570 * @returns Strict VBox status code (i.e. informational status codes too).
4571 * @param pVCpu The cross context virtual CPU structure.
4572 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
4573 * @param fIntrState The VT-x guest-interruptibility state.
4574 * @param fStepping Whether we are single-stepping the guest using the
4575 * hypervisor debugger and should return
4576 * VINF_EM_DBG_STEPPED if the event was dispatched
4577 * directly.
4578 */
4579static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t fIntrState, bool fStepping)
4580{
4581 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
4582#ifndef IN_NEM_DARWIN
4583 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4584#endif
4585
4586#ifdef VBOX_STRICT
4587 /*
4588 * Verify guest-interruptibility state.
4589 *
4590 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
4591 * since injecting an event may modify the interruptibility state and we must thus always
4592 * use fIntrState.
4593 */
4594 {
4595 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
4596 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
4597 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
4598 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
4599 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
4600 Assert(!TRPMHasTrap(pVCpu));
4601 NOREF(fBlockMovSS); NOREF(fBlockSti);
4602 }
4603#endif
4604
4605 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4606 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
4607 {
4608 /*
4609 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
4610 * pending even while injecting an event and in this case, we want a VM-exit as soon as
4611 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
4612 *
4613 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
4614 */
4615 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
4616#ifdef VBOX_STRICT
4617 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4618 {
4619 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
4620 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4621 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4622 }
4623 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
4624 {
4625 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
4626 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4627 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4628 }
4629#endif
4630 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
4631 uIntType));
4632
4633 /*
4634 * Inject the event and get any changes to the guest-interruptibility state.
4635 *
4636 * The guest-interruptibility state may need to be updated if we inject the event
4637 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
4638 */
4639 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
4640 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
4641
4642 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4643 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
4644 else
4645 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
4646 }
4647
4648 /*
4649 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
4650 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
4651 */
4652 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
4653 && !fIsNestedGuest)
4654 {
4655 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
4656
4657 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4658 {
4659 /*
4660 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
4661 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
4662 */
4663 Assert(!DBGFIsStepping(pVCpu));
4664 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
4665 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
4666 AssertRC(rc);
4667 }
4668 else
4669 {
4670 /*
4671 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
4672 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
4673 * we take care of this case in vmxHCExportSharedDebugState and also the case if
4674 * we use MTF, so just make sure it's called before executing guest-code.
4675 */
4676 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
4677 }
4678 }
4679 /* else: for nested-guest currently handling while merging controls. */
4680
4681 /*
4682 * Finally, update the guest-interruptibility state.
4683 *
4684 * This is required for the real-on-v86 software interrupt injection, for
4685 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
4686 */
4687 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
4688 AssertRC(rc);
4689
4690 /*
4691 * There's no need to clear the VM-entry interruption-information field here if we're not
4692 * injecting anything. VT-x clears the valid bit on every VM-exit.
4693 *
4694 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
4695 */
4696
4697 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
4698 return rcStrict;
4699}
4700
4701
4702/**
4703 * Tries to determine what part of the guest-state VT-x has deemed as invalid
4704 * and update error record fields accordingly.
4705 *
4706 * @returns VMX_IGS_* error codes.
4707 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
4708 * wrong with the guest state.
4709 *
4710 * @param pVCpu The cross context virtual CPU structure.
4711 * @param pVmcsInfo The VMCS info. object.
4712 *
4713 * @remarks This function assumes our cache of the VMCS controls
4714 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
4715 */
4716static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
4717{
4718#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
4719#define HMVMX_CHECK_BREAK(expr, err) do { \
4720 if (!(expr)) { uError = (err); break; } \
4721 } while (0)
4722
4723 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4724 uint32_t uError = VMX_IGS_ERROR;
4725 uint32_t u32IntrState = 0;
4726#ifndef IN_NEM_DARWIN
4727 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4728 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
4729#else
4730 bool const fUnrestrictedGuest = true;
4731#endif
4732 do
4733 {
4734 int rc;
4735
4736 /*
4737 * Guest-interruptibility state.
4738 *
4739 * Read this first so that any check that fails prior to those that actually
4740 * require the guest-interruptibility state would still reflect the correct
4741 * VMCS value and avoids causing further confusion.
4742 */
4743 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
4744 AssertRC(rc);
4745
4746 uint32_t u32Val;
4747 uint64_t u64Val;
4748
4749 /*
4750 * CR0.
4751 */
4752 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4753 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
4754 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
4755 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
4756 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
4757 if (fUnrestrictedGuest)
4758 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
4759
4760 uint64_t u64GuestCr0;
4761 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
4762 AssertRC(rc);
4763 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
4764 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
4765 if ( !fUnrestrictedGuest
4766 && (u64GuestCr0 & X86_CR0_PG)
4767 && !(u64GuestCr0 & X86_CR0_PE))
4768 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
4769
4770 /*
4771 * CR4.
4772 */
4773 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4774 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
4775 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
4776
4777 uint64_t u64GuestCr4;
4778 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
4779 AssertRC(rc);
4780 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
4781 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
4782
4783 /*
4784 * IA32_DEBUGCTL MSR.
4785 */
4786 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
4787 AssertRC(rc);
4788 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4789 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
4790 {
4791 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
4792 }
4793 uint64_t u64DebugCtlMsr = u64Val;
4794
4795#ifdef VBOX_STRICT
4796 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
4797 AssertRC(rc);
4798 Assert(u32Val == pVmcsInfo->u32EntryCtls);
4799#endif
4800 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4801
4802 /*
4803 * RIP and RFLAGS.
4804 */
4805 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
4806 AssertRC(rc);
4807 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
4808 if ( !fLongModeGuest
4809 || !pCtx->cs.Attr.n.u1Long)
4810 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
4811 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
4812 * must be identical if the "IA-32e mode guest" VM-entry
4813 * control is 1 and CS.L is 1. No check applies if the
4814 * CPU supports 64 linear-address bits. */
4815
4816 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
4817 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
4818 AssertRC(rc);
4819 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
4820 VMX_IGS_RFLAGS_RESERVED);
4821 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
4822 uint32_t const u32Eflags = u64Val;
4823
4824 if ( fLongModeGuest
4825 || ( fUnrestrictedGuest
4826 && !(u64GuestCr0 & X86_CR0_PE)))
4827 {
4828 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
4829 }
4830
4831 uint32_t u32EntryInfo;
4832 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
4833 AssertRC(rc);
4834 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
4835 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
4836
4837 /*
4838 * 64-bit checks.
4839 */
4840 if (fLongModeGuest)
4841 {
4842 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
4843 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
4844 }
4845
4846 if ( !fLongModeGuest
4847 && (u64GuestCr4 & X86_CR4_PCIDE))
4848 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
4849
4850 /** @todo CR3 field must be such that bits 63:52 and bits in the range
4851 * 51:32 beyond the processor's physical-address width are 0. */
4852
4853 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4854 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
4855 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
4856
4857#ifndef IN_NEM_DARWIN
4858 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
4859 AssertRC(rc);
4860 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
4861
4862 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
4863 AssertRC(rc);
4864 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
4865#endif
4866
4867 /*
4868 * PERF_GLOBAL MSR.
4869 */
4870 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
4871 {
4872 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
4873 AssertRC(rc);
4874 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
4875 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
4876 }
4877
4878 /*
4879 * PAT MSR.
4880 */
4881 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4882 {
4883 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
4884 AssertRC(rc);
4885 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
4886 for (unsigned i = 0; i < 8; i++)
4887 {
4888 uint8_t u8Val = (u64Val & 0xff);
4889 if ( u8Val != 0 /* UC */
4890 && u8Val != 1 /* WC */
4891 && u8Val != 4 /* WT */
4892 && u8Val != 5 /* WP */
4893 && u8Val != 6 /* WB */
4894 && u8Val != 7 /* UC- */)
4895 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
4896 u64Val >>= 8;
4897 }
4898 }
4899
4900 /*
4901 * EFER MSR.
4902 */
4903 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4904 {
4905 Assert(g_fHmVmxSupportsVmcsEfer);
4906 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
4907 AssertRC(rc);
4908 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
4909 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
4910 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
4911 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
4912 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
4913 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
4914 * iemVmxVmentryCheckGuestState(). */
4915 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4916 || !(u64GuestCr0 & X86_CR0_PG)
4917 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
4918 VMX_IGS_EFER_LMA_LME_MISMATCH);
4919 }
4920
4921 /*
4922 * Segment registers.
4923 */
4924 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
4925 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
4926 if (!(u32Eflags & X86_EFL_VM))
4927 {
4928 /* CS */
4929 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
4930 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
4931 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
4932 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4933 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4934 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
4935 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4936 /* CS cannot be loaded with NULL in protected mode. */
4937 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
4938 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
4939 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4940 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
4941 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4942 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
4943 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
4944 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
4945 else
4946 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
4947
4948 /* SS */
4949 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4950 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
4951 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
4952 if ( !(pCtx->cr0 & X86_CR0_PE)
4953 || pCtx->cs.Attr.n.u4Type == 3)
4954 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
4955
4956 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4957 {
4958 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
4959 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
4960 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
4961 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
4962 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4963 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4964 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
4965 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4966 }
4967
4968 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
4969 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4970 {
4971 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
4972 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
4973 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4974 || pCtx->ds.Attr.n.u4Type > 11
4975 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4976 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
4977 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
4978 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4979 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4980 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
4981 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4982 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4983 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
4984 }
4985 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4986 {
4987 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
4988 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
4989 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4990 || pCtx->es.Attr.n.u4Type > 11
4991 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4992 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
4993 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
4994 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
4995 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
4996 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
4997 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
4998 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4999 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5000 }
5001 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5002 {
5003 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5004 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5005 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5006 || pCtx->fs.Attr.n.u4Type > 11
5007 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5008 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5009 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5010 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5011 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5012 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5013 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5014 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5015 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5016 }
5017 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5018 {
5019 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5020 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5021 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5022 || pCtx->gs.Attr.n.u4Type > 11
5023 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5024 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5025 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5026 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5027 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5028 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5029 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5030 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5031 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5032 }
5033 /* 64-bit capable CPUs. */
5034 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5035 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5036 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5037 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5038 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5039 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5040 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5041 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5042 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5043 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5044 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5045 }
5046 else
5047 {
5048 /* V86 mode checks. */
5049 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5050 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5051 {
5052 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5053 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5054 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5055 }
5056 else
5057 {
5058 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5059 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5060 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5061 }
5062
5063 /* CS */
5064 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5065 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5066 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5067 /* SS */
5068 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5069 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5070 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5071 /* DS */
5072 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5073 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5074 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5075 /* ES */
5076 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5077 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5078 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5079 /* FS */
5080 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5081 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5082 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5083 /* GS */
5084 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5085 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5086 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5087 /* 64-bit capable CPUs. */
5088 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5089 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5090 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5091 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5092 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5093 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5094 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5095 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5096 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5097 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5098 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5099 }
5100
5101 /*
5102 * TR.
5103 */
5104 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5105 /* 64-bit capable CPUs. */
5106 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5107 if (fLongModeGuest)
5108 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5109 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5110 else
5111 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5112 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5113 VMX_IGS_TR_ATTR_TYPE_INVALID);
5114 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5115 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5116 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5117 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5118 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5119 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5120 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5121 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5122
5123 /*
5124 * GDTR and IDTR (64-bit capable checks).
5125 */
5126 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5127 AssertRC(rc);
5128 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5129
5130 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5131 AssertRC(rc);
5132 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5133
5134 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5135 AssertRC(rc);
5136 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5137
5138 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5139 AssertRC(rc);
5140 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5141
5142 /*
5143 * Guest Non-Register State.
5144 */
5145 /* Activity State. */
5146 uint32_t u32ActivityState;
5147 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5148 AssertRC(rc);
5149 HMVMX_CHECK_BREAK( !u32ActivityState
5150 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5151 VMX_IGS_ACTIVITY_STATE_INVALID);
5152 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5153 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5154
5155 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5156 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5157 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5158
5159 /** @todo Activity state and injecting interrupts. Left as a todo since we
5160 * currently don't use activity states but ACTIVE. */
5161
5162 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5163 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5164
5165 /* Guest interruptibility-state. */
5166 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5167 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5168 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5169 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5170 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5171 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5172 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5173 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5174 {
5175 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5176 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5177 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5178 }
5179 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5180 {
5181 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5182 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5183 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5184 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5185 }
5186 /** @todo Assumes the processor is not in SMM. */
5187 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5188 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5189 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5190 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5191 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5192 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5193 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5194 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5195
5196 /* Pending debug exceptions. */
5197 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5198 AssertRC(rc);
5199 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5200 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5201 u32Val = u64Val; /* For pending debug exceptions checks below. */
5202
5203 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5204 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5205 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5206 {
5207 if ( (u32Eflags & X86_EFL_TF)
5208 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5209 {
5210 /* Bit 14 is PendingDebug.BS. */
5211 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5212 }
5213 if ( !(u32Eflags & X86_EFL_TF)
5214 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5215 {
5216 /* Bit 14 is PendingDebug.BS. */
5217 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5218 }
5219 }
5220
5221#ifndef IN_NEM_DARWIN
5222 /* VMCS link pointer. */
5223 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5224 AssertRC(rc);
5225 if (u64Val != UINT64_C(0xffffffffffffffff))
5226 {
5227 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5228 /** @todo Bits beyond the processor's physical-address width MBZ. */
5229 /** @todo SMM checks. */
5230 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5231 Assert(pVmcsInfo->pvShadowVmcs);
5232 VMXVMCSREVID VmcsRevId;
5233 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5234 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5235 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5236 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5237 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5238 }
5239
5240 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5241 * not using nested paging? */
5242 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5243 && !fLongModeGuest
5244 && CPUMIsGuestInPAEModeEx(pCtx))
5245 {
5246 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5247 AssertRC(rc);
5248 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5249
5250 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5251 AssertRC(rc);
5252 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5253
5254 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5255 AssertRC(rc);
5256 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5257
5258 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5259 AssertRC(rc);
5260 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5261 }
5262#endif
5263
5264 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5265 if (uError == VMX_IGS_ERROR)
5266 uError = VMX_IGS_REASON_NOT_FOUND;
5267 } while (0);
5268
5269 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5270 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5271 return uError;
5272
5273#undef HMVMX_ERROR_BREAK
5274#undef HMVMX_CHECK_BREAK
5275}
5276/** @} */
5277
5278
5279#ifndef HMVMX_USE_FUNCTION_TABLE
5280/**
5281 * Handles a guest VM-exit from hardware-assisted VMX execution.
5282 *
5283 * @returns Strict VBox status code (i.e. informational status codes too).
5284 * @param pVCpu The cross context virtual CPU structure.
5285 * @param pVmxTransient The VMX-transient structure.
5286 */
5287DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5288{
5289#ifdef DEBUG_ramshankar
5290# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5291 do { \
5292 if (a_fSave != 0) \
5293 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
5294 VBOXSTRICTRC rcStrict = a_CallExpr; \
5295 if (a_fSave != 0) \
5296 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5297 return rcStrict; \
5298 } while (0)
5299#else
5300# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5301#endif
5302 uint32_t const uExitReason = pVmxTransient->uExitReason;
5303 switch (uExitReason)
5304 {
5305 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5306 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5307 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5308 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5309 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5310 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5311 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5312 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5313 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5314 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5315 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5316 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5317 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5318 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5319 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5320 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5321 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5322 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5323 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5324 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5325 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5326 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5327 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5328 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5329 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5330 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5331 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5332 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5333 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5334 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5335#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5336 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5337 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5338 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5339 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5340 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5341 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5342 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5343 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5344 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5345 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5346#else
5347 case VMX_EXIT_VMCLEAR:
5348 case VMX_EXIT_VMLAUNCH:
5349 case VMX_EXIT_VMPTRLD:
5350 case VMX_EXIT_VMPTRST:
5351 case VMX_EXIT_VMREAD:
5352 case VMX_EXIT_VMRESUME:
5353 case VMX_EXIT_VMWRITE:
5354 case VMX_EXIT_VMXOFF:
5355 case VMX_EXIT_VMXON:
5356 case VMX_EXIT_INVVPID:
5357 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5358#endif
5359#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT)
5360 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5361#else
5362 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5363#endif
5364
5365 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5366 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5367 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5368
5369 case VMX_EXIT_INIT_SIGNAL:
5370 case VMX_EXIT_SIPI:
5371 case VMX_EXIT_IO_SMI:
5372 case VMX_EXIT_SMI:
5373 case VMX_EXIT_ERR_MSR_LOAD:
5374 case VMX_EXIT_ERR_MACHINE_CHECK:
5375 case VMX_EXIT_PML_FULL:
5376 case VMX_EXIT_VIRTUALIZED_EOI:
5377 case VMX_EXIT_GDTR_IDTR_ACCESS:
5378 case VMX_EXIT_LDTR_TR_ACCESS:
5379 case VMX_EXIT_APIC_WRITE:
5380 case VMX_EXIT_RDRAND:
5381 case VMX_EXIT_RSM:
5382 case VMX_EXIT_VMFUNC:
5383 case VMX_EXIT_ENCLS:
5384 case VMX_EXIT_RDSEED:
5385 case VMX_EXIT_XSAVES:
5386 case VMX_EXIT_XRSTORS:
5387 case VMX_EXIT_UMWAIT:
5388 case VMX_EXIT_TPAUSE:
5389 case VMX_EXIT_LOADIWKEY:
5390 default:
5391 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5392 }
5393#undef VMEXIT_CALL_RET
5394}
5395#endif /* !HMVMX_USE_FUNCTION_TABLE */
5396
5397
5398#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5399/**
5400 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5401 *
5402 * @returns Strict VBox status code (i.e. informational status codes too).
5403 * @param pVCpu The cross context virtual CPU structure.
5404 * @param pVmxTransient The VMX-transient structure.
5405 */
5406DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5407{
5408 uint32_t const uExitReason = pVmxTransient->uExitReason;
5409 switch (uExitReason)
5410 {
5411 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5412 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5413 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5414 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5415 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5416
5417 /*
5418 * We shouldn't direct host physical interrupts to the nested-guest.
5419 */
5420 case VMX_EXIT_EXT_INT:
5421 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5422
5423 /*
5424 * Instructions that cause VM-exits unconditionally or the condition is
5425 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5426 * happens, it's guaranteed to be a nested-guest VM-exit).
5427 *
5428 * - Provides VM-exit instruction length ONLY.
5429 */
5430 case VMX_EXIT_CPUID: /* Unconditional. */
5431 case VMX_EXIT_VMCALL:
5432 case VMX_EXIT_GETSEC:
5433 case VMX_EXIT_INVD:
5434 case VMX_EXIT_XSETBV:
5435 case VMX_EXIT_VMLAUNCH:
5436 case VMX_EXIT_VMRESUME:
5437 case VMX_EXIT_VMXOFF:
5438 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5439 case VMX_EXIT_VMFUNC:
5440 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5441
5442 /*
5443 * Instructions that cause VM-exits unconditionally or the condition is
5444 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5445 * happens, it's guaranteed to be a nested-guest VM-exit).
5446 *
5447 * - Provides VM-exit instruction length.
5448 * - Provides VM-exit information.
5449 * - Optionally provides Exit qualification.
5450 *
5451 * Since Exit qualification is 0 for all VM-exits where it is not
5452 * applicable, reading and passing it to the guest should produce
5453 * defined behavior.
5454 *
5455 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5456 */
5457 case VMX_EXIT_INVEPT: /* Unconditional. */
5458 case VMX_EXIT_INVVPID:
5459 case VMX_EXIT_VMCLEAR:
5460 case VMX_EXIT_VMPTRLD:
5461 case VMX_EXIT_VMPTRST:
5462 case VMX_EXIT_VMXON:
5463 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5464 case VMX_EXIT_LDTR_TR_ACCESS:
5465 case VMX_EXIT_RDRAND:
5466 case VMX_EXIT_RDSEED:
5467 case VMX_EXIT_XSAVES:
5468 case VMX_EXIT_XRSTORS:
5469 case VMX_EXIT_UMWAIT:
5470 case VMX_EXIT_TPAUSE:
5471 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5472
5473 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5474 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5475 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5476 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5477 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5478 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5479 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5480 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5481 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5482 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5483 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5484 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5485 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5486 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5487 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5488 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5489 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5490 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5491 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5492
5493 case VMX_EXIT_PREEMPT_TIMER:
5494 {
5495 /** @todo NSTVMX: Preempt timer. */
5496 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5497 }
5498
5499 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5500 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
5501
5502 case VMX_EXIT_VMREAD:
5503 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
5504
5505 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
5506 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
5507
5508 case VMX_EXIT_INIT_SIGNAL:
5509 case VMX_EXIT_SIPI:
5510 case VMX_EXIT_IO_SMI:
5511 case VMX_EXIT_SMI:
5512 case VMX_EXIT_ERR_MSR_LOAD:
5513 case VMX_EXIT_ERR_MACHINE_CHECK:
5514 case VMX_EXIT_PML_FULL:
5515 case VMX_EXIT_RSM:
5516 default:
5517 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5518 }
5519}
5520#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5521
5522
5523/** @name VM-exit helpers.
5524 * @{
5525 */
5526/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5527/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5528/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5529
5530/** Macro for VM-exits called unexpectedly. */
5531#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5532 do { \
5533 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
5534 return VERR_VMX_UNEXPECTED_EXIT; \
5535 } while (0)
5536
5537#ifdef VBOX_STRICT
5538# ifndef IN_NEM_DARWIN
5539/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5540# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
5541 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5542
5543# define HMVMX_ASSERT_PREEMPT_CPUID() \
5544 do { \
5545 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
5546 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
5547 } while (0)
5548
5549# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5550 do { \
5551 AssertPtr((a_pVCpu)); \
5552 AssertPtr((a_pVmxTransient)); \
5553 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5554 Assert((a_pVmxTransient)->pVmcsInfo); \
5555 Assert(ASMIntAreEnabled()); \
5556 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5557 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
5558 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5559 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5560 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
5561 HMVMX_ASSERT_PREEMPT_CPUID(); \
5562 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5563 } while (0)
5564# else
5565# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
5566# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
5567# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5568 do { \
5569 AssertPtr((a_pVCpu)); \
5570 AssertPtr((a_pVmxTransient)); \
5571 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5572 Assert((a_pVmxTransient)->pVmcsInfo); \
5573 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5574 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5575 } while (0)
5576# endif
5577
5578# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5579 do { \
5580 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
5581 Assert((a_pVmxTransient)->fIsNestedGuest); \
5582 } while (0)
5583
5584# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5585 do { \
5586 Log4Func(("\n")); \
5587 } while (0)
5588#else
5589# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5590 do { \
5591 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5592 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
5593 } while (0)
5594
5595# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5596 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
5597
5598# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
5599#endif
5600
5601#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5602/** Macro that does the necessary privilege checks and intercepted VM-exits for
5603 * guests that attempted to execute a VMX instruction. */
5604# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
5605 do \
5606 { \
5607 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
5608 if (rcStrictTmp == VINF_SUCCESS) \
5609 { /* likely */ } \
5610 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5611 { \
5612 Assert((a_pVCpu)->hm.s.Event.fPending); \
5613 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
5614 return VINF_SUCCESS; \
5615 } \
5616 else \
5617 { \
5618 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
5619 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
5620 } \
5621 } while (0)
5622
5623/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
5624# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
5625 do \
5626 { \
5627 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
5628 (a_pGCPtrEffAddr)); \
5629 if (rcStrictTmp == VINF_SUCCESS) \
5630 { /* likely */ } \
5631 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5632 { \
5633 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
5634 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
5635 NOREF(uXcptTmp); \
5636 return VINF_SUCCESS; \
5637 } \
5638 else \
5639 { \
5640 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
5641 return rcStrictTmp; \
5642 } \
5643 } while (0)
5644#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5645
5646
5647/**
5648 * Advances the guest RIP by the specified number of bytes.
5649 *
5650 * @param pVCpu The cross context virtual CPU structure.
5651 * @param cbInstr Number of bytes to advance the RIP by.
5652 *
5653 * @remarks No-long-jump zone!!!
5654 */
5655DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
5656{
5657 /* Advance the RIP. */
5658 pVCpu->cpum.GstCtx.rip += cbInstr;
5659 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
5660
5661 /* Update interrupt inhibition. */
5662 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5663 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
5664 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5665}
5666
5667
5668/**
5669 * Advances the guest RIP after reading it from the VMCS.
5670 *
5671 * @returns VBox status code, no informational status codes.
5672 * @param pVCpu The cross context virtual CPU structure.
5673 * @param pVmxTransient The VMX-transient structure.
5674 *
5675 * @remarks No-long-jump zone!!!
5676 */
5677static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5678{
5679 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
5680 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
5681 AssertRCReturn(rc, rc);
5682
5683 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
5684 return VINF_SUCCESS;
5685}
5686
5687
5688/**
5689 * Handle a condition that occurred while delivering an event through the guest or
5690 * nested-guest IDT.
5691 *
5692 * @returns Strict VBox status code (i.e. informational status codes too).
5693 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5694 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
5695 * to continue execution of the guest which will delivery the \#DF.
5696 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5697 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5698 *
5699 * @param pVCpu The cross context virtual CPU structure.
5700 * @param pVmxTransient The VMX-transient structure.
5701 *
5702 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
5703 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
5704 * is due to an EPT violation, PML full or SPP-related event.
5705 *
5706 * @remarks No-long-jump zone!!!
5707 */
5708static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5709{
5710 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
5711 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
5712 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5713 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5714 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5715 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
5716
5717 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5718 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5719 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
5720 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
5721 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
5722 {
5723 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
5724 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
5725
5726 /*
5727 * If the event was a software interrupt (generated with INT n) or a software exception
5728 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
5729 * can handle the VM-exit and continue guest execution which will re-execute the
5730 * instruction rather than re-injecting the exception, as that can cause premature
5731 * trips to ring-3 before injection and involve TRPM which currently has no way of
5732 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
5733 * the problem).
5734 */
5735 IEMXCPTRAISE enmRaise;
5736 IEMXCPTRAISEINFO fRaiseInfo;
5737 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5738 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5739 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5740 {
5741 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5742 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5743 }
5744 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
5745 {
5746 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
5747 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
5748 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
5749
5750 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
5751 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
5752
5753 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5754
5755 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
5756 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5757 {
5758 pVmxTransient->fVectoringPF = true;
5759 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5760 }
5761 }
5762 else
5763 {
5764 /*
5765 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
5766 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
5767 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
5768 */
5769 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5770 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5771 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
5772 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5773 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5774 }
5775
5776 /*
5777 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
5778 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
5779 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
5780 * subsequent VM-entry would fail, see @bugref{7445}.
5781 *
5782 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
5783 */
5784 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5785 && enmRaise == IEMXCPTRAISE_PREV_EVENT
5786 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5787 && CPUMIsGuestNmiBlocking(pVCpu))
5788 {
5789 CPUMSetGuestNmiBlocking(pVCpu, false);
5790 }
5791
5792 switch (enmRaise)
5793 {
5794 case IEMXCPTRAISE_CURRENT_XCPT:
5795 {
5796 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
5797 Assert(rcStrict == VINF_SUCCESS);
5798 break;
5799 }
5800
5801 case IEMXCPTRAISE_PREV_EVENT:
5802 {
5803 uint32_t u32ErrCode;
5804 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
5805 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5806 else
5807 u32ErrCode = 0;
5808
5809 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
5810 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
5811 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */,
5812 u32ErrCode, pVCpu->cpum.GstCtx.cr2);
5813
5814 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5815 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
5816 Assert(rcStrict == VINF_SUCCESS);
5817 break;
5818 }
5819
5820 case IEMXCPTRAISE_REEXEC_INSTR:
5821 Assert(rcStrict == VINF_SUCCESS);
5822 break;
5823
5824 case IEMXCPTRAISE_DOUBLE_FAULT:
5825 {
5826 /*
5827 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
5828 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
5829 */
5830 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
5831 {
5832 pVmxTransient->fVectoringDoublePF = true;
5833 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5834 pVCpu->cpum.GstCtx.cr2));
5835 rcStrict = VINF_SUCCESS;
5836 }
5837 else
5838 {
5839 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
5840 vmxHCSetPendingXcptDF(pVCpu);
5841 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5842 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5843 rcStrict = VINF_HM_DOUBLE_FAULT;
5844 }
5845 break;
5846 }
5847
5848 case IEMXCPTRAISE_TRIPLE_FAULT:
5849 {
5850 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
5851 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5852 rcStrict = VINF_EM_RESET;
5853 break;
5854 }
5855
5856 case IEMXCPTRAISE_CPU_HANG:
5857 {
5858 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
5859 rcStrict = VERR_EM_GUEST_CPU_HANG;
5860 break;
5861 }
5862
5863 default:
5864 {
5865 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
5866 rcStrict = VERR_VMX_IPE_2;
5867 break;
5868 }
5869 }
5870 }
5871 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5872 && !CPUMIsGuestNmiBlocking(pVCpu))
5873 {
5874 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
5875 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
5876 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
5877 {
5878 /*
5879 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
5880 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5881 * that virtual NMIs remain blocked until the IRET execution is completed.
5882 *
5883 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
5884 */
5885 CPUMSetGuestNmiBlocking(pVCpu, true);
5886 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5887 }
5888 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5889 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5890 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5891 {
5892 /*
5893 * Execution of IRET caused an EPT violation, page-modification log-full event or
5894 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
5895 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5896 * that virtual NMIs remain blocked until the IRET execution is completed.
5897 *
5898 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
5899 */
5900 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
5901 {
5902 CPUMSetGuestNmiBlocking(pVCpu, true);
5903 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5904 }
5905 }
5906 }
5907
5908 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
5909 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
5910 return rcStrict;
5911}
5912
5913
5914#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5915/**
5916 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
5917 * guest attempting to execute a VMX instruction.
5918 *
5919 * @returns Strict VBox status code (i.e. informational status codes too).
5920 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5921 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
5922 *
5923 * @param pVCpu The cross context virtual CPU structure.
5924 * @param uExitReason The VM-exit reason.
5925 *
5926 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
5927 * @remarks No-long-jump zone!!!
5928 */
5929static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
5930{
5931 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
5932 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
5933
5934 /*
5935 * The physical CPU would have already checked the CPU mode/code segment.
5936 * We shall just assert here for paranoia.
5937 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
5938 */
5939 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
5940 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
5941 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
5942
5943 if (uExitReason == VMX_EXIT_VMXON)
5944 {
5945 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5946
5947 /*
5948 * We check CR4.VMXE because it is required to be always set while in VMX operation
5949 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
5950 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
5951 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
5952 */
5953 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
5954 {
5955 Log4Func(("CR4.VMXE is not set -> #UD\n"));
5956 vmxHCSetPendingXcptUD(pVCpu);
5957 return VINF_HM_PENDING_XCPT;
5958 }
5959 }
5960 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
5961 {
5962 /*
5963 * The guest has not entered VMX operation but attempted to execute a VMX instruction
5964 * (other than VMXON), we need to raise a #UD.
5965 */
5966 Log4Func(("Not in VMX root mode -> #UD\n"));
5967 vmxHCSetPendingXcptUD(pVCpu);
5968 return VINF_HM_PENDING_XCPT;
5969 }
5970
5971 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
5972 return VINF_SUCCESS;
5973}
5974
5975
5976/**
5977 * Decodes the memory operand of an instruction that caused a VM-exit.
5978 *
5979 * The Exit qualification field provides the displacement field for memory
5980 * operand instructions, if any.
5981 *
5982 * @returns Strict VBox status code (i.e. informational status codes too).
5983 * @retval VINF_SUCCESS if the operand was successfully decoded.
5984 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
5985 * operand.
5986 * @param pVCpu The cross context virtual CPU structure.
5987 * @param uExitInstrInfo The VM-exit instruction information field.
5988 * @param enmMemAccess The memory operand's access type (read or write).
5989 * @param GCPtrDisp The instruction displacement field, if any. For
5990 * RIP-relative addressing pass RIP + displacement here.
5991 * @param pGCPtrMem Where to store the effective destination memory address.
5992 *
5993 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
5994 * virtual-8086 mode hence skips those checks while verifying if the
5995 * segment is valid.
5996 */
5997static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
5998 PRTGCPTR pGCPtrMem)
5999{
6000 Assert(pGCPtrMem);
6001 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6002 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6003 | CPUMCTX_EXTRN_CR0);
6004
6005 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6006 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6007 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6008
6009 VMXEXITINSTRINFO ExitInstrInfo;
6010 ExitInstrInfo.u = uExitInstrInfo;
6011 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6012 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6013 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6014 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6015 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6016 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6017 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6018 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6019 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6020
6021 /*
6022 * Validate instruction information.
6023 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6024 */
6025 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6026 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6027 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6028 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6029 AssertLogRelMsgReturn(fIsMemOperand,
6030 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6031
6032 /*
6033 * Compute the complete effective address.
6034 *
6035 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6036 * See AMD spec. 4.5.2 "Segment Registers".
6037 */
6038 RTGCPTR GCPtrMem = GCPtrDisp;
6039 if (fBaseRegValid)
6040 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6041 if (fIdxRegValid)
6042 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6043
6044 RTGCPTR const GCPtrOff = GCPtrMem;
6045 if ( !fIsLongMode
6046 || iSegReg >= X86_SREG_FS)
6047 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6048 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6049
6050 /*
6051 * Validate effective address.
6052 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6053 */
6054 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6055 Assert(cbAccess > 0);
6056 if (fIsLongMode)
6057 {
6058 if (X86_IS_CANONICAL(GCPtrMem))
6059 {
6060 *pGCPtrMem = GCPtrMem;
6061 return VINF_SUCCESS;
6062 }
6063
6064 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6065 * "Data Limit Checks in 64-bit Mode". */
6066 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6067 vmxHCSetPendingXcptGP(pVCpu, 0);
6068 return VINF_HM_PENDING_XCPT;
6069 }
6070
6071 /*
6072 * This is a watered down version of iemMemApplySegment().
6073 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6074 * and segment CPL/DPL checks are skipped.
6075 */
6076 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6077 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6078 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6079
6080 /* Check if the segment is present and usable. */
6081 if ( pSel->Attr.n.u1Present
6082 && !pSel->Attr.n.u1Unusable)
6083 {
6084 Assert(pSel->Attr.n.u1DescType);
6085 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6086 {
6087 /* Check permissions for the data segment. */
6088 if ( enmMemAccess == VMXMEMACCESS_WRITE
6089 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6090 {
6091 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6092 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6093 return VINF_HM_PENDING_XCPT;
6094 }
6095
6096 /* Check limits if it's a normal data segment. */
6097 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6098 {
6099 if ( GCPtrFirst32 > pSel->u32Limit
6100 || GCPtrLast32 > pSel->u32Limit)
6101 {
6102 Log4Func(("Data segment limit exceeded. "
6103 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6104 GCPtrLast32, pSel->u32Limit));
6105 if (iSegReg == X86_SREG_SS)
6106 vmxHCSetPendingXcptSS(pVCpu, 0);
6107 else
6108 vmxHCSetPendingXcptGP(pVCpu, 0);
6109 return VINF_HM_PENDING_XCPT;
6110 }
6111 }
6112 else
6113 {
6114 /* Check limits if it's an expand-down data segment.
6115 Note! The upper boundary is defined by the B bit, not the G bit! */
6116 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6117 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6118 {
6119 Log4Func(("Expand-down data segment limit exceeded. "
6120 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6121 GCPtrLast32, pSel->u32Limit));
6122 if (iSegReg == X86_SREG_SS)
6123 vmxHCSetPendingXcptSS(pVCpu, 0);
6124 else
6125 vmxHCSetPendingXcptGP(pVCpu, 0);
6126 return VINF_HM_PENDING_XCPT;
6127 }
6128 }
6129 }
6130 else
6131 {
6132 /* Check permissions for the code segment. */
6133 if ( enmMemAccess == VMXMEMACCESS_WRITE
6134 || ( enmMemAccess == VMXMEMACCESS_READ
6135 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6136 {
6137 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6138 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6139 vmxHCSetPendingXcptGP(pVCpu, 0);
6140 return VINF_HM_PENDING_XCPT;
6141 }
6142
6143 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6144 if ( GCPtrFirst32 > pSel->u32Limit
6145 || GCPtrLast32 > pSel->u32Limit)
6146 {
6147 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6148 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6149 if (iSegReg == X86_SREG_SS)
6150 vmxHCSetPendingXcptSS(pVCpu, 0);
6151 else
6152 vmxHCSetPendingXcptGP(pVCpu, 0);
6153 return VINF_HM_PENDING_XCPT;
6154 }
6155 }
6156 }
6157 else
6158 {
6159 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6160 vmxHCSetPendingXcptGP(pVCpu, 0);
6161 return VINF_HM_PENDING_XCPT;
6162 }
6163
6164 *pGCPtrMem = GCPtrMem;
6165 return VINF_SUCCESS;
6166}
6167#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6168
6169
6170/**
6171 * VM-exit helper for LMSW.
6172 */
6173static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6174{
6175 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6176 AssertRCReturn(rc, rc);
6177
6178 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6179 AssertMsg( rcStrict == VINF_SUCCESS
6180 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6181
6182 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6183 if (rcStrict == VINF_IEM_RAISED_XCPT)
6184 {
6185 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6186 rcStrict = VINF_SUCCESS;
6187 }
6188
6189 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6190 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6191 return rcStrict;
6192}
6193
6194
6195/**
6196 * VM-exit helper for CLTS.
6197 */
6198static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6199{
6200 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6201 AssertRCReturn(rc, rc);
6202
6203 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6204 AssertMsg( rcStrict == VINF_SUCCESS
6205 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6206
6207 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6208 if (rcStrict == VINF_IEM_RAISED_XCPT)
6209 {
6210 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6211 rcStrict = VINF_SUCCESS;
6212 }
6213
6214 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6215 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6216 return rcStrict;
6217}
6218
6219
6220/**
6221 * VM-exit helper for MOV from CRx (CRx read).
6222 */
6223static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6224{
6225 Assert(iCrReg < 16);
6226 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6227
6228 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6229 AssertRCReturn(rc, rc);
6230
6231 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6232 AssertMsg( rcStrict == VINF_SUCCESS
6233 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6234
6235 if (iGReg == X86_GREG_xSP)
6236 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6237 else
6238 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6239#ifdef VBOX_WITH_STATISTICS
6240 switch (iCrReg)
6241 {
6242 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6243 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6244 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6245 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6246 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6247 }
6248#endif
6249 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6250 return rcStrict;
6251}
6252
6253
6254/**
6255 * VM-exit helper for MOV to CRx (CRx write).
6256 */
6257static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6258{
6259 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6260
6261 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6262 AssertMsg( rcStrict == VINF_SUCCESS
6263 || rcStrict == VINF_IEM_RAISED_XCPT
6264 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6265
6266 switch (iCrReg)
6267 {
6268 case 0:
6269 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6270 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6271 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6272 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6273 break;
6274
6275 case 2:
6276 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6277 /* Nothing to do here, CR2 it's not part of the VMCS. */
6278 break;
6279
6280 case 3:
6281 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6282 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6283 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6284 break;
6285
6286 case 4:
6287 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6288 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6289#ifndef IN_NEM_DARWIN
6290 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6291 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6292#else
6293 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6294#endif
6295 break;
6296
6297 case 8:
6298 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6299 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6300 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6301 break;
6302
6303 default:
6304 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6305 break;
6306 }
6307
6308 if (rcStrict == VINF_IEM_RAISED_XCPT)
6309 {
6310 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6311 rcStrict = VINF_SUCCESS;
6312 }
6313 return rcStrict;
6314}
6315
6316
6317/**
6318 * VM-exit exception handler for \#PF (Page-fault exception).
6319 *
6320 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6321 */
6322static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6323{
6324 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6325 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6326
6327#ifndef IN_NEM_DARWIN
6328 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6329 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6330 { /* likely */ }
6331 else
6332#endif
6333 {
6334#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6335 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6336#endif
6337 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6338 if (!pVmxTransient->fVectoringDoublePF)
6339 {
6340 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6341 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6342 }
6343 else
6344 {
6345 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6346 Assert(!pVmxTransient->fIsNestedGuest);
6347 vmxHCSetPendingXcptDF(pVCpu);
6348 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6349 }
6350 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6351 return VINF_SUCCESS;
6352 }
6353
6354 Assert(!pVmxTransient->fIsNestedGuest);
6355
6356 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6357 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6358 if (pVmxTransient->fVectoringPF)
6359 {
6360 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6361 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6362 }
6363
6364 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6365 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6366 AssertRCReturn(rc, rc);
6367
6368 Log4Func(("#PF: cs:rip=%#04x:%#RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
6369 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
6370
6371 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6372 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
6373
6374 Log4Func(("#PF: rc=%Rrc\n", rc));
6375 if (rc == VINF_SUCCESS)
6376 {
6377 /*
6378 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6379 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6380 */
6381 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6382 TRPMResetTrap(pVCpu);
6383 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6384 return rc;
6385 }
6386
6387 if (rc == VINF_EM_RAW_GUEST_TRAP)
6388 {
6389 if (!pVmxTransient->fVectoringDoublePF)
6390 {
6391 /* It's a guest page fault and needs to be reflected to the guest. */
6392 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6393 TRPMResetTrap(pVCpu);
6394 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6395 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6396 uGstErrorCode, pVmxTransient->uExitQual);
6397 }
6398 else
6399 {
6400 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6401 TRPMResetTrap(pVCpu);
6402 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6403 vmxHCSetPendingXcptDF(pVCpu);
6404 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6405 }
6406
6407 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6408 return VINF_SUCCESS;
6409 }
6410
6411 TRPMResetTrap(pVCpu);
6412 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6413 return rc;
6414}
6415
6416
6417/**
6418 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6419 *
6420 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6421 */
6422static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6423{
6424 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6425 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6426
6427 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
6428 AssertRCReturn(rc, rc);
6429
6430 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6431 {
6432 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6433 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6434
6435 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6436 * provides VM-exit instruction length. If this causes problem later,
6437 * disassemble the instruction like it's done on AMD-V. */
6438 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6439 AssertRCReturn(rc2, rc2);
6440 return rc;
6441 }
6442
6443 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6444 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6445 return VINF_SUCCESS;
6446}
6447
6448
6449/**
6450 * VM-exit exception handler for \#BP (Breakpoint exception).
6451 *
6452 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6453 */
6454static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6455{
6456 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6457 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6458
6459 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6460 AssertRCReturn(rc, rc);
6461
6462 VBOXSTRICTRC rcStrict;
6463 if (!pVmxTransient->fIsNestedGuest)
6464 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
6465 else
6466 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6467
6468 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6469 {
6470 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6471 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6472 rcStrict = VINF_SUCCESS;
6473 }
6474
6475 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6476 return rcStrict;
6477}
6478
6479
6480/**
6481 * VM-exit exception handler for \#AC (Alignment-check exception).
6482 *
6483 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6484 */
6485static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6486{
6487 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6488
6489 /*
6490 * Detect #ACs caused by host having enabled split-lock detection.
6491 * Emulate such instructions.
6492 */
6493 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo,
6494 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
6495 AssertRCReturn(rc, rc);
6496 /** @todo detect split lock in cpu feature? */
6497 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6498 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6499 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6500 || CPUMGetGuestCPL(pVCpu) != 3
6501 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6502 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6503 {
6504 /*
6505 * Check for debug/trace events and import state accordingly.
6506 */
6507 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6508 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6509 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
6510#ifndef IN_NEM_DARWIN
6511 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
6512#endif
6513 )
6514 {
6515 if (pVM->cCpus == 1)
6516 {
6517#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6518 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6519#else
6520 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6521#endif
6522 AssertRCReturn(rc, rc);
6523 }
6524 }
6525 else
6526 {
6527 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6528 AssertRCReturn(rc, rc);
6529
6530 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
6531
6532 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
6533 {
6534 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
6535 if (rcStrict != VINF_SUCCESS)
6536 return rcStrict;
6537 }
6538 }
6539
6540 /*
6541 * Emulate the instruction.
6542 *
6543 * We have to ignore the LOCK prefix here as we must not retrigger the
6544 * detection on the host. This isn't all that satisfactory, though...
6545 */
6546 if (pVM->cCpus == 1)
6547 {
6548 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
6549 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6550
6551 /** @todo For SMP configs we should do a rendezvous here. */
6552 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
6553 if (rcStrict == VINF_SUCCESS)
6554#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6555 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6556 HM_CHANGED_GUEST_RIP
6557 | HM_CHANGED_GUEST_RFLAGS
6558 | HM_CHANGED_GUEST_GPRS_MASK
6559 | HM_CHANGED_GUEST_CS
6560 | HM_CHANGED_GUEST_SS);
6561#else
6562 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6563#endif
6564 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6565 {
6566 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6567 rcStrict = VINF_SUCCESS;
6568 }
6569 return rcStrict;
6570 }
6571 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
6572 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6573 return VINF_EM_EMULATE_SPLIT_LOCK;
6574 }
6575
6576 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
6577 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6578 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
6579
6580 /* Re-inject it. We'll detect any nesting before getting here. */
6581 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6582 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6583 return VINF_SUCCESS;
6584}
6585
6586
6587/**
6588 * VM-exit exception handler for \#DB (Debug exception).
6589 *
6590 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6591 */
6592static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6593{
6594 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6595 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
6596
6597 /*
6598 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
6599 */
6600 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6601
6602 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
6603 uint64_t const uDR6 = X86_DR6_INIT_VAL
6604 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
6605 | X86_DR6_BD | X86_DR6_BS));
6606
6607 int rc;
6608 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6609 if (!pVmxTransient->fIsNestedGuest)
6610 {
6611 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6612
6613 /*
6614 * Prevents stepping twice over the same instruction when the guest is stepping using
6615 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
6616 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
6617 */
6618 if ( rc == VINF_EM_DBG_STEPPED
6619 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
6620 {
6621 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6622 rc = VINF_EM_RAW_GUEST_TRAP;
6623 }
6624 }
6625 else
6626 rc = VINF_EM_RAW_GUEST_TRAP;
6627 Log6Func(("rc=%Rrc\n", rc));
6628 if (rc == VINF_EM_RAW_GUEST_TRAP)
6629 {
6630 /*
6631 * The exception was for the guest. Update DR6, DR7.GD and
6632 * IA32_DEBUGCTL.LBR before forwarding it.
6633 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
6634 */
6635#ifndef IN_NEM_DARWIN
6636 VMMRZCallRing3Disable(pVCpu);
6637 HM_DISABLE_PREEMPT(pVCpu);
6638
6639 pCtx->dr[6] &= ~X86_DR6_B_MASK;
6640 pCtx->dr[6] |= uDR6;
6641 if (CPUMIsGuestDebugStateActive(pVCpu))
6642 ASMSetDR6(pCtx->dr[6]);
6643
6644 HM_RESTORE_PREEMPT();
6645 VMMRZCallRing3Enable(pVCpu);
6646#else
6647 /** @todo */
6648#endif
6649
6650 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
6651 AssertRCReturn(rc, rc);
6652
6653 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
6654 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
6655
6656 /* Paranoia. */
6657 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
6658 pCtx->dr[7] |= X86_DR7_RA1_MASK;
6659
6660 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
6661 AssertRC(rc);
6662
6663 /*
6664 * Raise #DB in the guest.
6665 *
6666 * It is important to reflect exactly what the VM-exit gave us (preserving the
6667 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
6668 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
6669 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
6670 *
6671 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
6672 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6673 */
6674 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6675 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6676 return VINF_SUCCESS;
6677 }
6678
6679 /*
6680 * Not a guest trap, must be a hypervisor related debug event then.
6681 * Update DR6 in case someone is interested in it.
6682 */
6683 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
6684 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
6685 CPUMSetHyperDR6(pVCpu, uDR6);
6686
6687 return rc;
6688}
6689
6690
6691/**
6692 * Hacks its way around the lovely mesa driver's backdoor accesses.
6693 *
6694 * @sa hmR0SvmHandleMesaDrvGp.
6695 */
6696static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6697{
6698 LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
6699 RT_NOREF(pCtx);
6700
6701 /* For now we'll just skip the instruction. */
6702 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6703}
6704
6705
6706/**
6707 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
6708 * backdoor logging w/o checking what it is running inside.
6709 *
6710 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
6711 * backdoor port and magic numbers loaded in registers.
6712 *
6713 * @returns true if it is, false if it isn't.
6714 * @sa hmR0SvmIsMesaDrvGp.
6715 */
6716DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6717{
6718 /* 0xed: IN eAX,dx */
6719 uint8_t abInstr[1];
6720 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
6721 return false;
6722
6723 /* Check that it is #GP(0). */
6724 if (pVmxTransient->uExitIntErrorCode != 0)
6725 return false;
6726
6727 /* Check magic and port. */
6728 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
6729 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
6730 if (pCtx->rax != UINT32_C(0x564d5868))
6731 return false;
6732 if (pCtx->dx != UINT32_C(0x5658))
6733 return false;
6734
6735 /* Flat ring-3 CS. */
6736 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
6737 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
6738 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
6739 if (pCtx->cs.Attr.n.u2Dpl != 3)
6740 return false;
6741 if (pCtx->cs.u64Base != 0)
6742 return false;
6743
6744 /* Check opcode. */
6745 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
6746 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
6747 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
6748 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
6749 if (RT_FAILURE(rc))
6750 return false;
6751 if (abInstr[0] != 0xed)
6752 return false;
6753
6754 return true;
6755}
6756
6757
6758/**
6759 * VM-exit exception handler for \#GP (General-protection exception).
6760 *
6761 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6762 */
6763static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6764{
6765 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6766 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
6767
6768 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6769 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6770#ifndef IN_NEM_DARWIN
6771 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
6772 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6773 { /* likely */ }
6774 else
6775#endif
6776 {
6777#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6778# ifndef IN_NEM_DARWIN
6779 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6780# else
6781 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6782# endif
6783#endif
6784 /*
6785 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
6786 * executing a nested-guest, reflect #GP to the guest or nested-guest.
6787 */
6788 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6789 AssertRCReturn(rc, rc);
6790 Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
6791 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
6792
6793 if ( pVmxTransient->fIsNestedGuest
6794 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
6795 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
6796 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6797 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6798 else
6799 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
6800 return rc;
6801 }
6802
6803#ifndef IN_NEM_DARWIN
6804 Assert(CPUMIsGuestInRealModeEx(pCtx));
6805 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
6806 Assert(!pVmxTransient->fIsNestedGuest);
6807
6808 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6809 AssertRCReturn(rc, rc);
6810
6811 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
6812 if (rcStrict == VINF_SUCCESS)
6813 {
6814 if (!CPUMIsGuestInRealModeEx(pCtx))
6815 {
6816 /*
6817 * The guest is no longer in real-mode, check if we can continue executing the
6818 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
6819 */
6820 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
6821 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
6822 {
6823 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
6824 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6825 }
6826 else
6827 {
6828 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
6829 rcStrict = VINF_EM_RESCHEDULE;
6830 }
6831 }
6832 else
6833 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6834 }
6835 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6836 {
6837 rcStrict = VINF_SUCCESS;
6838 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6839 }
6840 return VBOXSTRICTRC_VAL(rcStrict);
6841#endif
6842}
6843
6844
6845/**
6846 * VM-exit exception handler wrapper for all other exceptions that are not handled
6847 * by a specific handler.
6848 *
6849 * This simply re-injects the exception back into the VM without any special
6850 * processing.
6851 *
6852 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6853 */
6854static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6855{
6856 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6857
6858#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6859# ifndef IN_NEM_DARWIN
6860 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6861 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
6862 ("uVector=%#x u32XcptBitmap=%#X32\n",
6863 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
6864 NOREF(pVmcsInfo);
6865# endif
6866#endif
6867
6868 /*
6869 * Re-inject the exception into the guest. This cannot be a double-fault condition which
6870 * would have been handled while checking exits due to event delivery.
6871 */
6872 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6873
6874#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6875 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6876 AssertRCReturn(rc, rc);
6877 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6878#endif
6879
6880#ifdef VBOX_WITH_STATISTICS
6881 switch (uVector)
6882 {
6883 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
6884 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
6885 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
6886 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6887 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
6888 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
6889 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6890 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
6891 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
6892 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
6893 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
6894 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
6895 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
6896 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
6897 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
6898 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
6899 default:
6900 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
6901 break;
6902 }
6903#endif
6904
6905 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
6906 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
6907 NOREF(uVector);
6908
6909 /* Re-inject the original exception into the guest. */
6910 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6911 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6912 return VINF_SUCCESS;
6913}
6914
6915
6916/**
6917 * VM-exit exception handler for all exceptions (except NMIs!).
6918 *
6919 * @remarks This may be called for both guests and nested-guests. Take care to not
6920 * make assumptions and avoid doing anything that is not relevant when
6921 * executing a nested-guest (e.g., Mesa driver hacks).
6922 */
6923static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6924{
6925 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6926
6927 /*
6928 * If this VM-exit occurred while delivering an event through the guest IDT, take
6929 * action based on the return code and additional hints (e.g. for page-faults)
6930 * that will be updated in the VMX transient structure.
6931 */
6932 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
6933 if (rcStrict == VINF_SUCCESS)
6934 {
6935 /*
6936 * If an exception caused a VM-exit due to delivery of an event, the original
6937 * event may have to be re-injected into the guest. We shall reinject it and
6938 * continue guest execution. However, page-fault is a complicated case and
6939 * needs additional processing done in vmxHCExitXcptPF().
6940 */
6941 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
6942 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6943 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
6944 || uVector == X86_XCPT_PF)
6945 {
6946 switch (uVector)
6947 {
6948 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
6949 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
6950 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
6951 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
6952 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
6953 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
6954 default:
6955 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
6956 }
6957 }
6958 /* else: inject pending event before resuming guest execution. */
6959 }
6960 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
6961 {
6962 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6963 rcStrict = VINF_SUCCESS;
6964 }
6965
6966 return rcStrict;
6967}
6968/** @} */
6969
6970
6971/** @name VM-exit handlers.
6972 * @{
6973 */
6974/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6975/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6976/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6977
6978/**
6979 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
6980 */
6981HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6982{
6983 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6984 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
6985
6986#ifndef IN_NEM_DARWIN
6987 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
6988 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
6989 return VINF_SUCCESS;
6990 return VINF_EM_RAW_INTERRUPT;
6991#else
6992 return VINF_SUCCESS;
6993#endif
6994}
6995
6996
6997/**
6998 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
6999 * VM-exit.
7000 */
7001HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7002{
7003 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7004 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7005
7006 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
7007
7008 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7009 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7010 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7011
7012 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7013 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7014 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7015 NOREF(pVmcsInfo);
7016
7017 VBOXSTRICTRC rcStrict;
7018 switch (uExitIntType)
7019 {
7020#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7021 /*
7022 * Host physical NMIs:
7023 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7024 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7025 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7026 *
7027 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7028 * See Intel spec. 27.5.5 "Updating Non-Register State".
7029 */
7030 case VMX_EXIT_INT_INFO_TYPE_NMI:
7031 {
7032 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7033 break;
7034 }
7035#endif
7036
7037 /*
7038 * Privileged software exceptions (#DB from ICEBP),
7039 * Software exceptions (#BP and #OF),
7040 * Hardware exceptions:
7041 * Process the required exceptions and resume guest execution if possible.
7042 */
7043 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7044 Assert(uVector == X86_XCPT_DB);
7045 RT_FALL_THRU();
7046 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7047 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7048 RT_FALL_THRU();
7049 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7050 {
7051 NOREF(uVector);
7052 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
7053 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7054 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
7055 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
7056
7057 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7058 break;
7059 }
7060
7061 default:
7062 {
7063 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7064 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7065 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7066 break;
7067 }
7068 }
7069
7070 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7071 return rcStrict;
7072}
7073
7074
7075/**
7076 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7077 */
7078HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7079{
7080 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7081
7082 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7083 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7084 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7085
7086 /* Evaluate and deliver pending events and resume guest execution. */
7087 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7088 return VINF_SUCCESS;
7089}
7090
7091
7092/**
7093 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7094 */
7095HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7096{
7097 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7098
7099 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7100 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7101 {
7102 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7103 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7104 }
7105
7106 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
7107
7108 /*
7109 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7110 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7111 */
7112 uint32_t fIntrState;
7113 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7114 AssertRC(rc);
7115 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7116 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7117 {
7118 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7119 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7120
7121 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7122 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7123 AssertRC(rc);
7124 }
7125
7126 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7127 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7128
7129 /* Evaluate and deliver pending events and resume guest execution. */
7130 return VINF_SUCCESS;
7131}
7132
7133
7134/**
7135 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7136 */
7137HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7138{
7139 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7140 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7141}
7142
7143
7144/**
7145 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7146 */
7147HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7148{
7149 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7150 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7151}
7152
7153
7154/**
7155 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7156 */
7157HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7158{
7159 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7160
7161 /*
7162 * Get the state we need and update the exit history entry.
7163 */
7164 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7165 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7166
7167 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7168 AssertRCReturn(rc, rc);
7169
7170 VBOXSTRICTRC rcStrict;
7171 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7172 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7173 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7174 if (!pExitRec)
7175 {
7176 /*
7177 * Regular CPUID instruction execution.
7178 */
7179 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7180 if (rcStrict == VINF_SUCCESS)
7181 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7182 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7183 {
7184 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7185 rcStrict = VINF_SUCCESS;
7186 }
7187 }
7188 else
7189 {
7190 /*
7191 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7192 */
7193 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7194 AssertRCReturn(rc2, rc2);
7195
7196 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7197 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7198
7199 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7200 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7201
7202 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7203 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7204 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7205 }
7206 return rcStrict;
7207}
7208
7209
7210/**
7211 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7212 */
7213HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7214{
7215 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7216
7217 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7218 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
7219 AssertRCReturn(rc, rc);
7220
7221 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7222 return VINF_EM_RAW_EMULATE_INSTR;
7223
7224 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7225 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7226}
7227
7228
7229/**
7230 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7231 */
7232HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7233{
7234 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7235
7236 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7237 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7238 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
7239 AssertRCReturn(rc, rc);
7240
7241 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7242 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7243 {
7244 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7245 we must reset offsetting on VM-entry. See @bugref{6634}. */
7246 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7247 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7248 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7249 }
7250 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7251 {
7252 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7253 rcStrict = VINF_SUCCESS;
7254 }
7255 return rcStrict;
7256}
7257
7258
7259/**
7260 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7261 */
7262HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7263{
7264 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7265
7266 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7267 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7268 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
7269 AssertRCReturn(rc, rc);
7270
7271 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7272 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7273 {
7274 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7275 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7276 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7277 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7278 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7279 }
7280 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7281 {
7282 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7283 rcStrict = VINF_SUCCESS;
7284 }
7285 return rcStrict;
7286}
7287
7288
7289/**
7290 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7291 */
7292HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7293{
7294 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7295
7296 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7297 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
7298 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
7299 AssertRCReturn(rc, rc);
7300
7301 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7302 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7303 if (RT_LIKELY(rc == VINF_SUCCESS))
7304 {
7305 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7306 Assert(pVmxTransient->cbExitInstr == 2);
7307 }
7308 else
7309 {
7310 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7311 rc = VERR_EM_INTERPRETER;
7312 }
7313 return rc;
7314}
7315
7316
7317/**
7318 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7319 */
7320HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7321{
7322 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7323
7324 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7325 if (EMAreHypercallInstructionsEnabled(pVCpu))
7326 {
7327 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7328 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
7329 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
7330 AssertRCReturn(rc, rc);
7331
7332 /* Perform the hypercall. */
7333 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7334 if (rcStrict == VINF_SUCCESS)
7335 {
7336 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7337 AssertRCReturn(rc, rc);
7338 }
7339 else
7340 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7341 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7342 || RT_FAILURE(rcStrict));
7343
7344 /* If the hypercall changes anything other than guest's general-purpose registers,
7345 we would need to reload the guest changed bits here before VM-entry. */
7346 }
7347 else
7348 Log4Func(("Hypercalls not enabled\n"));
7349
7350 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7351 if (RT_FAILURE(rcStrict))
7352 {
7353 vmxHCSetPendingXcptUD(pVCpu);
7354 rcStrict = VINF_SUCCESS;
7355 }
7356
7357 return rcStrict;
7358}
7359
7360
7361/**
7362 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7363 */
7364HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7365{
7366 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7367#ifndef IN_NEM_DARWIN
7368 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7369#endif
7370
7371 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7372 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7373 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7374 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7375 AssertRCReturn(rc, rc);
7376
7377 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7378
7379 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7380 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7381 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7382 {
7383 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7384 rcStrict = VINF_SUCCESS;
7385 }
7386 else
7387 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7388 VBOXSTRICTRC_VAL(rcStrict)));
7389 return rcStrict;
7390}
7391
7392
7393/**
7394 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7395 */
7396HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7397{
7398 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7399
7400 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7401 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7402 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
7403 AssertRCReturn(rc, rc);
7404
7405 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7406 if (rcStrict == VINF_SUCCESS)
7407 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7408 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7409 {
7410 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7411 rcStrict = VINF_SUCCESS;
7412 }
7413
7414 return rcStrict;
7415}
7416
7417
7418/**
7419 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7420 */
7421HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7422{
7423 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7424
7425 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7426 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7427 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7428 AssertRCReturn(rc, rc);
7429
7430 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7431 if (RT_SUCCESS(rcStrict))
7432 {
7433 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7434 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7435 rcStrict = VINF_SUCCESS;
7436 }
7437
7438 return rcStrict;
7439}
7440
7441
7442/**
7443 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7444 * VM-exit.
7445 */
7446HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7447{
7448 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7449 return VINF_EM_RESET;
7450}
7451
7452
7453/**
7454 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7455 */
7456HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7457{
7458 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7459
7460 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7461 AssertRCReturn(rc, rc);
7462
7463 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7464 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
7465 rc = VINF_SUCCESS;
7466 else
7467 rc = VINF_EM_HALT;
7468
7469 if (rc != VINF_SUCCESS)
7470 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
7471 return rc;
7472}
7473
7474
7475/**
7476 * VM-exit handler for instructions that result in a \#UD exception delivered to
7477 * the guest.
7478 */
7479HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7480{
7481 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7482 vmxHCSetPendingXcptUD(pVCpu);
7483 return VINF_SUCCESS;
7484}
7485
7486
7487/**
7488 * VM-exit handler for expiry of the VMX-preemption timer.
7489 */
7490HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7491{
7492 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7493
7494 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
7495 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7496Log12(("vmxHCExitPreemptTimer:\n"));
7497
7498 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7499 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7500 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7501 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
7502 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7503}
7504
7505
7506/**
7507 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7508 */
7509HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7510{
7511 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7512
7513 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7514 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7515 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
7516 AssertRCReturn(rc, rc);
7517
7518 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
7519 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
7520 : HM_CHANGED_RAISED_XCPT_MASK);
7521
7522#ifndef IN_NEM_DARWIN
7523 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7524 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
7525 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
7526 {
7527 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
7528 hmR0VmxUpdateStartVmFunction(pVCpu);
7529 }
7530#endif
7531
7532 return rcStrict;
7533}
7534
7535
7536/**
7537 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7538 */
7539HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7540{
7541 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7542
7543 /** @todo Enable the new code after finding a reliably guest test-case. */
7544#if 1
7545 return VERR_EM_INTERPRETER;
7546#else
7547 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7548 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
7549 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7550 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
7551 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7552 AssertRCReturn(rc, rc);
7553
7554 /* Paranoia. Ensure this has a memory operand. */
7555 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
7556
7557 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
7558 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
7559 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
7560 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
7561
7562 RTGCPTR GCPtrDesc;
7563 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
7564
7565 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
7566 GCPtrDesc, uType);
7567 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7568 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7569 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7570 {
7571 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7572 rcStrict = VINF_SUCCESS;
7573 }
7574 return rcStrict;
7575#endif
7576}
7577
7578
7579/**
7580 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
7581 * VM-exit.
7582 */
7583HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7584{
7585 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7586 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7587 AssertRCReturn(rc, rc);
7588
7589 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
7590 if (RT_FAILURE(rc))
7591 return rc;
7592
7593 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
7594 NOREF(uInvalidReason);
7595
7596#ifdef VBOX_STRICT
7597 uint32_t fIntrState;
7598 uint64_t u64Val;
7599 vmxHCReadEntryIntInfoVmcs(pVCpu, pVmxTransient);
7600 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
7601 vmxHCReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7602
7603 Log4(("uInvalidReason %u\n", uInvalidReason));
7604 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
7605 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7606 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7607
7608 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
7609 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
7610 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
7611 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
7612 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
7613 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
7614 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
7615 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7616 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
7617 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
7618 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
7619 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7620# ifndef IN_NEM_DARWIN
7621 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
7622 {
7623 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7624 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7625 }
7626
7627 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
7628# endif
7629#endif
7630
7631 return VERR_VMX_INVALID_GUEST_STATE;
7632}
7633
7634/**
7635 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
7636 */
7637HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7638{
7639 /*
7640 * Cumulative notes of all recognized but unexpected VM-exits.
7641 *
7642 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
7643 * nested-paging is used.
7644 *
7645 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
7646 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
7647 * this function (and thereby stop VM execution) for handling such instructions.
7648 *
7649 *
7650 * VMX_EXIT_INIT_SIGNAL:
7651 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
7652 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
7653 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
7654 *
7655 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
7656 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
7657 * See Intel spec. "23.8 Restrictions on VMX operation".
7658 *
7659 * VMX_EXIT_SIPI:
7660 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
7661 * activity state is used. We don't make use of it as our guests don't have direct
7662 * access to the host local APIC.
7663 *
7664 * See Intel spec. 25.3 "Other Causes of VM-exits".
7665 *
7666 * VMX_EXIT_IO_SMI:
7667 * VMX_EXIT_SMI:
7668 * This can only happen if we support dual-monitor treatment of SMI, which can be
7669 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
7670 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
7671 * VMX root mode or receive an SMI. If we get here, something funny is going on.
7672 *
7673 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
7674 * See Intel spec. 25.3 "Other Causes of VM-Exits"
7675 *
7676 * VMX_EXIT_ERR_MSR_LOAD:
7677 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
7678 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
7679 * execution.
7680 *
7681 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
7682 *
7683 * VMX_EXIT_ERR_MACHINE_CHECK:
7684 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
7685 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
7686 * #MC exception abort class exception is raised. We thus cannot assume a
7687 * reasonable chance of continuing any sort of execution and we bail.
7688 *
7689 * See Intel spec. 15.1 "Machine-check Architecture".
7690 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
7691 *
7692 * VMX_EXIT_PML_FULL:
7693 * VMX_EXIT_VIRTUALIZED_EOI:
7694 * VMX_EXIT_APIC_WRITE:
7695 * We do not currently support any of these features and thus they are all unexpected
7696 * VM-exits.
7697 *
7698 * VMX_EXIT_GDTR_IDTR_ACCESS:
7699 * VMX_EXIT_LDTR_TR_ACCESS:
7700 * VMX_EXIT_RDRAND:
7701 * VMX_EXIT_RSM:
7702 * VMX_EXIT_VMFUNC:
7703 * VMX_EXIT_ENCLS:
7704 * VMX_EXIT_RDSEED:
7705 * VMX_EXIT_XSAVES:
7706 * VMX_EXIT_XRSTORS:
7707 * VMX_EXIT_UMWAIT:
7708 * VMX_EXIT_TPAUSE:
7709 * VMX_EXIT_LOADIWKEY:
7710 * These VM-exits are -not- caused unconditionally by execution of the corresponding
7711 * instruction. Any VM-exit for these instructions indicate a hardware problem,
7712 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
7713 *
7714 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
7715 */
7716 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7717 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
7718 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7719}
7720
7721
7722/**
7723 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7724 */
7725HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7726{
7727 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7728
7729 /** @todo Optimize this: We currently drag in the whole MSR state
7730 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7731 * MSRs required. That would require changes to IEM and possibly CPUM too.
7732 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7733 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7734 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7735 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7736 switch (idMsr)
7737 {
7738 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7739 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7740 }
7741
7742 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7743 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7744 AssertRCReturn(rc, rc);
7745
7746 Log4Func(("ecx=%#RX32\n", idMsr));
7747
7748#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7749 Assert(!pVmxTransient->fIsNestedGuest);
7750 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7751 {
7752 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
7753 && idMsr != MSR_K6_EFER)
7754 {
7755 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
7756 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7757 }
7758 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7759 {
7760 Assert(pVmcsInfo->pvMsrBitmap);
7761 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7762 if (fMsrpm & VMXMSRPM_ALLOW_RD)
7763 {
7764 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
7765 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7766 }
7767 }
7768 }
7769#endif
7770
7771 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
7772 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
7773 if (rcStrict == VINF_SUCCESS)
7774 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7775 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7776 {
7777 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7778 rcStrict = VINF_SUCCESS;
7779 }
7780 else
7781 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
7782 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7783
7784 return rcStrict;
7785}
7786
7787
7788/**
7789 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7790 */
7791HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7792{
7793 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7794
7795 /** @todo Optimize this: We currently drag in the whole MSR state
7796 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7797 * MSRs required. That would require changes to IEM and possibly CPUM too.
7798 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7799 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7800 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7801
7802 /*
7803 * The FS and GS base MSRs are not part of the above all-MSRs mask.
7804 * Although we don't need to fetch the base as it will be overwritten shortly, while
7805 * loading guest-state we would also load the entire segment register including limit
7806 * and attributes and thus we need to load them here.
7807 */
7808 switch (idMsr)
7809 {
7810 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7811 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7812 }
7813
7814 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7815 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7816 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7817 AssertRCReturn(rc, rc);
7818
7819 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
7820
7821 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
7822 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
7823
7824 if (rcStrict == VINF_SUCCESS)
7825 {
7826 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7827
7828 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7829 if ( idMsr == MSR_IA32_APICBASE
7830 || ( idMsr >= MSR_IA32_X2APIC_START
7831 && idMsr <= MSR_IA32_X2APIC_END))
7832 {
7833 /*
7834 * We've already saved the APIC related guest-state (TPR) in post-run phase.
7835 * When full APIC register virtualization is implemented we'll have to make
7836 * sure APIC state is saved from the VMCS before IEM changes it.
7837 */
7838 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7839 }
7840 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7841 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7842 else if (idMsr == MSR_K6_EFER)
7843 {
7844 /*
7845 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
7846 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
7847 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
7848 */
7849 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
7850 }
7851
7852 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
7853 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
7854 {
7855 switch (idMsr)
7856 {
7857 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
7858 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
7859 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
7860 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
7861 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
7862 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
7863 default:
7864 {
7865#ifndef IN_NEM_DARWIN
7866 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7867 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
7868 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7869 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
7870#else
7871 AssertMsgFailed(("TODO\n"));
7872#endif
7873 break;
7874 }
7875 }
7876 }
7877#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7878 else
7879 {
7880 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7881 switch (idMsr)
7882 {
7883 case MSR_IA32_SYSENTER_CS:
7884 case MSR_IA32_SYSENTER_EIP:
7885 case MSR_IA32_SYSENTER_ESP:
7886 case MSR_K8_FS_BASE:
7887 case MSR_K8_GS_BASE:
7888 {
7889 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
7890 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7891 }
7892
7893 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
7894 default:
7895 {
7896 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7897 {
7898 /* EFER MSR writes are always intercepted. */
7899 if (idMsr != MSR_K6_EFER)
7900 {
7901 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
7902 idMsr));
7903 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7904 }
7905 }
7906
7907 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7908 {
7909 Assert(pVmcsInfo->pvMsrBitmap);
7910 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7911 if (fMsrpm & VMXMSRPM_ALLOW_WR)
7912 {
7913 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
7914 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7915 }
7916 }
7917 break;
7918 }
7919 }
7920 }
7921#endif /* VBOX_STRICT */
7922 }
7923 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7924 {
7925 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7926 rcStrict = VINF_SUCCESS;
7927 }
7928 else
7929 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
7930 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7931
7932 return rcStrict;
7933}
7934
7935
7936/**
7937 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
7938 */
7939HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7940{
7941 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7942
7943 /** @todo The guest has likely hit a contended spinlock. We might want to
7944 * poke a schedule different guest VCPU. */
7945 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7946 if (RT_SUCCESS(rc))
7947 return VINF_EM_RAW_INTERRUPT;
7948
7949 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
7950 return rc;
7951}
7952
7953
7954/**
7955 * VM-exit handler for when the TPR value is lowered below the specified
7956 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
7957 */
7958HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7959{
7960 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7961 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
7962
7963 /*
7964 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
7965 * We'll re-evaluate pending interrupts and inject them before the next VM
7966 * entry so we can just continue execution here.
7967 */
7968 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
7969 return VINF_SUCCESS;
7970}
7971
7972
7973/**
7974 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
7975 * VM-exit.
7976 *
7977 * @retval VINF_SUCCESS when guest execution can continue.
7978 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
7979 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
7980 * incompatible guest state for VMX execution (real-on-v86 case).
7981 */
7982HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7983{
7984 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7985 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
7986
7987 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7988 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7989 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7990
7991 VBOXSTRICTRC rcStrict;
7992 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7993 uint64_t const uExitQual = pVmxTransient->uExitQual;
7994 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
7995 switch (uAccessType)
7996 {
7997 /*
7998 * MOV to CRx.
7999 */
8000 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8001 {
8002 /*
8003 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8004 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8005 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8006 * PAE PDPTEs as well.
8007 */
8008 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8009 AssertRCReturn(rc, rc);
8010
8011 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8012#ifndef IN_NEM_DARWIN
8013 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8014#endif
8015 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8016 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8017
8018 /*
8019 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8020 * - When nested paging isn't used.
8021 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8022 * - We are executing in the VM debug loop.
8023 */
8024#ifndef IN_NEM_DARWIN
8025 Assert( iCrReg != 3
8026 || !VM_IS_VMX_NESTED_PAGING(pVM)
8027 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8028 || pVCpu->hmr0.s.fUsingDebugLoop);
8029#else
8030 Assert( iCrReg != 3
8031 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8032#endif
8033
8034 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8035 Assert( iCrReg != 8
8036 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8037
8038 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8039 AssertMsg( rcStrict == VINF_SUCCESS
8040 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8041
8042#ifndef IN_NEM_DARWIN
8043 /*
8044 * This is a kludge for handling switches back to real mode when we try to use
8045 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8046 * deal with special selector values, so we have to return to ring-3 and run
8047 * there till the selector values are V86 mode compatible.
8048 *
8049 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8050 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8051 * this function.
8052 */
8053 if ( iCrReg == 0
8054 && rcStrict == VINF_SUCCESS
8055 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8056 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8057 && (uOldCr0 & X86_CR0_PE)
8058 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8059 {
8060 /** @todo Check selectors rather than returning all the time. */
8061 Assert(!pVmxTransient->fIsNestedGuest);
8062 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8063 rcStrict = VINF_EM_RESCHEDULE_REM;
8064 }
8065#endif
8066
8067 break;
8068 }
8069
8070 /*
8071 * MOV from CRx.
8072 */
8073 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8074 {
8075 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8076 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8077
8078 /*
8079 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8080 * - When nested paging isn't used.
8081 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8082 * - We are executing in the VM debug loop.
8083 */
8084#ifndef IN_NEM_DARWIN
8085 Assert( iCrReg != 3
8086 || !VM_IS_VMX_NESTED_PAGING(pVM)
8087 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8088 || pVCpu->hmr0.s.fLeaveDone);
8089#else
8090 Assert( iCrReg != 3
8091 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8092#endif
8093
8094 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8095 Assert( iCrReg != 8
8096 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8097
8098 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8099 break;
8100 }
8101
8102 /*
8103 * CLTS (Clear Task-Switch Flag in CR0).
8104 */
8105 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8106 {
8107 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8108 break;
8109 }
8110
8111 /*
8112 * LMSW (Load Machine-Status Word into CR0).
8113 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8114 */
8115 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8116 {
8117 RTGCPTR GCPtrEffDst;
8118 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8119 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8120 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8121 if (fMemOperand)
8122 {
8123 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
8124 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8125 }
8126 else
8127 GCPtrEffDst = NIL_RTGCPTR;
8128 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8129 break;
8130 }
8131
8132 default:
8133 {
8134 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8135 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8136 }
8137 }
8138
8139 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8140 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8141 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8142
8143 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8144 NOREF(pVM);
8145 return rcStrict;
8146}
8147
8148
8149/**
8150 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8151 * VM-exit.
8152 */
8153HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8154{
8155 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8156 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8157
8158 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8159 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8160 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8161 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8162 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
8163 | CPUMCTX_EXTRN_EFER);
8164 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8165 AssertRCReturn(rc, rc);
8166
8167 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8168 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8169 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8170 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8171 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8172 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8173 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8174 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8175
8176 /*
8177 * Update exit history to see if this exit can be optimized.
8178 */
8179 VBOXSTRICTRC rcStrict;
8180 PCEMEXITREC pExitRec = NULL;
8181 if ( !fGstStepping
8182 && !fDbgStepping)
8183 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8184 !fIOString
8185 ? !fIOWrite
8186 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8187 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8188 : !fIOWrite
8189 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8190 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8191 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8192 if (!pExitRec)
8193 {
8194 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8195 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8196
8197 uint32_t const cbValue = s_aIOSizes[uIOSize];
8198 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8199 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8200 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8201 if (fIOString)
8202 {
8203 /*
8204 * INS/OUTS - I/O String instruction.
8205 *
8206 * Use instruction-information if available, otherwise fall back on
8207 * interpreting the instruction.
8208 */
8209 Log4Func(("cs:rip=%#04x:%#RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8210 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8211 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8212 if (fInsOutsInfo)
8213 {
8214 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8215 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8216 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8217 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8218 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8219 if (fIOWrite)
8220 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8221 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8222 else
8223 {
8224 /*
8225 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8226 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8227 * See Intel Instruction spec. for "INS".
8228 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8229 */
8230 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8231 }
8232 }
8233 else
8234 rcStrict = IEMExecOne(pVCpu);
8235
8236 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8237 fUpdateRipAlready = true;
8238 }
8239 else
8240 {
8241 /*
8242 * IN/OUT - I/O instruction.
8243 */
8244 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8245 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8246 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8247 if (fIOWrite)
8248 {
8249 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8250 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8251#ifndef IN_NEM_DARWIN
8252 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8253 && !pCtx->eflags.Bits.u1TF)
8254 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8255#endif
8256 }
8257 else
8258 {
8259 uint32_t u32Result = 0;
8260 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8261 if (IOM_SUCCESS(rcStrict))
8262 {
8263 /* Save result of I/O IN instr. in AL/AX/EAX. */
8264 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8265 }
8266#ifndef IN_NEM_DARWIN
8267 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8268 && !pCtx->eflags.Bits.u1TF)
8269 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8270#endif
8271 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8272 }
8273 }
8274
8275 if (IOM_SUCCESS(rcStrict))
8276 {
8277 if (!fUpdateRipAlready)
8278 {
8279 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8280 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8281 }
8282
8283 /*
8284 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8285 * while booting Fedora 17 64-bit guest.
8286 *
8287 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8288 */
8289 if (fIOString)
8290 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8291
8292 /*
8293 * If any I/O breakpoints are armed, we need to check if one triggered
8294 * and take appropriate action.
8295 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8296 */
8297 rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
8298 AssertRCReturn(rc, rc);
8299
8300 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8301 * execution engines about whether hyper BPs and such are pending. */
8302 uint32_t const uDr7 = pCtx->dr[7];
8303 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8304 && X86_DR7_ANY_RW_IO(uDr7)
8305 && (pCtx->cr4 & X86_CR4_DE))
8306 || DBGFBpIsHwIoArmed(pVM)))
8307 {
8308 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8309
8310#ifndef IN_NEM_DARWIN
8311 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8312 VMMRZCallRing3Disable(pVCpu);
8313 HM_DISABLE_PREEMPT(pVCpu);
8314
8315 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8316
8317 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8318 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8319 {
8320 /* Raise #DB. */
8321 if (fIsGuestDbgActive)
8322 ASMSetDR6(pCtx->dr[6]);
8323 if (pCtx->dr[7] != uDr7)
8324 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8325
8326 vmxHCSetPendingXcptDB(pVCpu);
8327 }
8328 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8329 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8330 else if ( rcStrict2 != VINF_SUCCESS
8331 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8332 rcStrict = rcStrict2;
8333 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8334
8335 HM_RESTORE_PREEMPT();
8336 VMMRZCallRing3Enable(pVCpu);
8337#else
8338 /** @todo */
8339#endif
8340 }
8341 }
8342
8343#ifdef VBOX_STRICT
8344 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8345 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8346 Assert(!fIOWrite);
8347 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8348 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8349 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8350 Assert(fIOWrite);
8351 else
8352 {
8353# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8354 * statuses, that the VMM device and some others may return. See
8355 * IOM_SUCCESS() for guidance. */
8356 AssertMsg( RT_FAILURE(rcStrict)
8357 || rcStrict == VINF_SUCCESS
8358 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8359 || rcStrict == VINF_EM_DBG_BREAKPOINT
8360 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8361 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8362# endif
8363 }
8364#endif
8365 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8366 }
8367 else
8368 {
8369 /*
8370 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8371 */
8372 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8373 AssertRCReturn(rc2, rc2);
8374 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8375 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8376 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8377 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8378 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8379 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8380
8381 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8382 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8383
8384 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8385 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8386 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8387 }
8388 return rcStrict;
8389}
8390
8391
8392/**
8393 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8394 * VM-exit.
8395 */
8396HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8397{
8398 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8399
8400 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8401 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8402 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8403 {
8404 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8405 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8406 {
8407 uint32_t uErrCode;
8408 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8409 {
8410 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8411 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8412 }
8413 else
8414 uErrCode = 0;
8415
8416 RTGCUINTPTR GCPtrFaultAddress;
8417 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8418 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8419 else
8420 GCPtrFaultAddress = 0;
8421
8422 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8423
8424 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8425 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8426
8427 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8428 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8429 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8430 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8431 }
8432 }
8433
8434 /* Fall back to the interpreter to emulate the task-switch. */
8435 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8436 return VERR_EM_INTERPRETER;
8437}
8438
8439
8440/**
8441 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8442 */
8443HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8444{
8445 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8446
8447 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8448 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
8449 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8450 AssertRC(rc);
8451 return VINF_EM_DBG_STEPPED;
8452}
8453
8454
8455/**
8456 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8457 */
8458HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8459{
8460 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8461 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
8462
8463 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8464 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8465 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8466 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8467 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8468
8469 /*
8470 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8471 */
8472 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8473 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8474 {
8475 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
8476 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
8477 {
8478 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8479 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8480 }
8481 }
8482 else
8483 {
8484 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8485 return rcStrict;
8486 }
8487
8488 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
8489 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8490 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8491 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8492 AssertRCReturn(rc, rc);
8493
8494 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8495 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
8496 switch (uAccessType)
8497 {
8498#ifndef IN_NEM_DARWIN
8499 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8500 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8501 {
8502 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
8503 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
8504 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8505
8506 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
8507 GCPhys &= PAGE_BASE_GC_MASK;
8508 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
8509 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
8510 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
8511
8512 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
8513 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
8514 Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8515 if ( rcStrict == VINF_SUCCESS
8516 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8517 || rcStrict == VERR_PAGE_NOT_PRESENT)
8518 {
8519 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8520 | HM_CHANGED_GUEST_APIC_TPR);
8521 rcStrict = VINF_SUCCESS;
8522 }
8523 break;
8524 }
8525#else
8526 /** @todo */
8527#endif
8528
8529 default:
8530 {
8531 Log4Func(("uAccessType=%#x\n", uAccessType));
8532 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
8533 break;
8534 }
8535 }
8536
8537 if (rcStrict != VINF_SUCCESS)
8538 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
8539 return rcStrict;
8540}
8541
8542
8543/**
8544 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8545 * VM-exit.
8546 */
8547HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8548{
8549 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8550 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8551
8552 /*
8553 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
8554 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
8555 * must emulate the MOV DRx access.
8556 */
8557 if (!pVmxTransient->fIsNestedGuest)
8558 {
8559 /* We should -not- get this VM-exit if the guest's debug registers were active. */
8560 if (pVmxTransient->fWasGuestDebugStateActive)
8561 {
8562 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
8563 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8564 }
8565
8566 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
8567 && !pVmxTransient->fWasHyperDebugStateActive)
8568 {
8569 Assert(!DBGFIsStepping(pVCpu));
8570 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
8571
8572 /* Don't intercept MOV DRx any more. */
8573 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
8574 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8575 AssertRC(rc);
8576
8577#ifndef IN_NEM_DARWIN
8578 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
8579 VMMRZCallRing3Disable(pVCpu);
8580 HM_DISABLE_PREEMPT(pVCpu);
8581
8582 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8583 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
8584 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8585
8586 HM_RESTORE_PREEMPT();
8587 VMMRZCallRing3Enable(pVCpu);
8588#else
8589 /** @todo */
8590#endif
8591
8592#ifdef VBOX_WITH_STATISTICS
8593 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8594 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8595 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8596 else
8597 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8598#endif
8599 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
8600 return VINF_SUCCESS;
8601 }
8602 }
8603
8604 /*
8605 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
8606 * The EFER MSR is always up-to-date.
8607 * Update the segment registers and DR7 from the CPU.
8608 */
8609 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8610 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8611 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
8612 AssertRCReturn(rc, rc);
8613 Log4Func(("cs:rip=%#04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
8614
8615 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8616 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8617 {
8618 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8619 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
8620 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
8621 if (RT_SUCCESS(rc))
8622 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
8623 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8624 }
8625 else
8626 {
8627 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8628 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
8629 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
8630 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8631 }
8632
8633 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8634 if (RT_SUCCESS(rc))
8635 {
8636 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8637 AssertRCReturn(rc2, rc2);
8638 return VINF_SUCCESS;
8639 }
8640 return rc;
8641}
8642
8643
8644/**
8645 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8646 * Conditional VM-exit.
8647 */
8648HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8649{
8650 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8651
8652#ifndef IN_NEM_DARWIN
8653 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8654
8655 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8656 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8657 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8658 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8659 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8660
8661 /*
8662 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8663 */
8664 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8665 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8666 {
8667 /*
8668 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
8669 * instruction emulation to inject the original event. Otherwise, injecting the original event
8670 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
8671 */
8672 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8673 { /* likely */ }
8674 else
8675 {
8676 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8677#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8678 /** @todo NSTVMX: Think about how this should be handled. */
8679 if (pVmxTransient->fIsNestedGuest)
8680 return VERR_VMX_IPE_3;
8681#endif
8682 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8683 }
8684 }
8685 else
8686 {
8687 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8688 return rcStrict;
8689 }
8690
8691 /*
8692 * Get sufficient state and update the exit history entry.
8693 */
8694 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8695 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8696 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8697 AssertRCReturn(rc, rc);
8698
8699 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8700 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8701 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
8702 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8703 if (!pExitRec)
8704 {
8705 /*
8706 * If we succeed, resume guest execution.
8707 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8708 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8709 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8710 * weird case. See @bugref{6043}.
8711 */
8712 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8713 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8714/** @todo bird: We can probably just go straight to IOM here and assume that
8715 * it's MMIO, then fall back on PGM if that hunch didn't work out so
8716 * well. However, we need to address that aliasing workarounds that
8717 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
8718 *
8719 * Might also be interesting to see if we can get this done more or
8720 * less locklessly inside IOM. Need to consider the lookup table
8721 * updating and use a bit more carefully first (or do all updates via
8722 * rendezvous) */
8723 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
8724 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
8725 if ( rcStrict == VINF_SUCCESS
8726 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8727 || rcStrict == VERR_PAGE_NOT_PRESENT)
8728 {
8729 /* Successfully handled MMIO operation. */
8730 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8731 | HM_CHANGED_GUEST_APIC_TPR);
8732 rcStrict = VINF_SUCCESS;
8733 }
8734 }
8735 else
8736 {
8737 /*
8738 * Frequent exit or something needing probing. Call EMHistoryExec.
8739 */
8740 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
8741 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
8742
8743 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8744 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8745
8746 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8747 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8748 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8749 }
8750 return rcStrict;
8751#else
8752 AssertFailed();
8753 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
8754#endif
8755}
8756
8757
8758/**
8759 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8760 * VM-exit.
8761 */
8762HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8763{
8764 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8765#ifndef IN_NEM_DARWIN
8766 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8767
8768 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8769 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8770 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8771 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8772 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8773 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8774
8775 /*
8776 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8777 */
8778 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8779 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8780 {
8781 /*
8782 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
8783 * we shall resolve the nested #PF and re-inject the original event.
8784 */
8785 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8786 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
8787 }
8788 else
8789 {
8790 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8791 return rcStrict;
8792 }
8793
8794 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8795 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8796 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8797 AssertRCReturn(rc, rc);
8798
8799 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8800 uint64_t const uExitQual = pVmxTransient->uExitQual;
8801 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
8802
8803 RTGCUINT uErrorCode = 0;
8804 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
8805 uErrorCode |= X86_TRAP_PF_ID;
8806 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8807 uErrorCode |= X86_TRAP_PF_RW;
8808 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
8809 uErrorCode |= X86_TRAP_PF_P;
8810
8811 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8812 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%#RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
8813
8814 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8815
8816 /*
8817 * Handle the pagefault trap for the nested shadow table.
8818 */
8819 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8820 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
8821 TRPMResetTrap(pVCpu);
8822
8823 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8824 if ( rcStrict == VINF_SUCCESS
8825 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8826 || rcStrict == VERR_PAGE_NOT_PRESENT)
8827 {
8828 /* Successfully synced our nested page tables. */
8829 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
8830 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
8831 return VINF_SUCCESS;
8832 }
8833#else
8834 PVM pVM = pVCpu->CTX_SUFF(pVM);
8835 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
8836 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8837 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8838 vmxHCImportGuestRip(pVCpu);
8839 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
8840
8841 /*
8842 * Ask PGM for information about the given GCPhys. We need to check if we're
8843 * out of sync first.
8844 */
8845 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
8846 PGMPHYSNEMPAGEINFO Info;
8847 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
8848 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
8849 if (RT_SUCCESS(rc))
8850 {
8851 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8852 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
8853 {
8854 if (State.fCanResume)
8855 {
8856 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
8857 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8858 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8859 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8860 State.fDidSomething ? "" : " no-change"));
8861 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
8862 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8863 return VINF_SUCCESS;
8864 }
8865 }
8866
8867 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
8868 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8869 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8870 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8871 State.fDidSomething ? "" : " no-change"));
8872 }
8873 else
8874 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
8875 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8876 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
8877
8878 /*
8879 * Emulate the memory access, either access handler or special memory.
8880 */
8881 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
8882 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8883 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
8884 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
8885 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8886
8887 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8888 AssertRCReturn(rc, rc);
8889
8890 VBOXSTRICTRC rcStrict;
8891 if (!pExitRec)
8892 rcStrict = IEMExecOne(pVCpu);
8893 else
8894 {
8895 /* Frequent access or probing. */
8896 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8897 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8898 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8899 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8900 }
8901
8902 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8903#endif
8904
8905 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8906 return rcStrict;
8907}
8908
8909
8910#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8911/**
8912 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
8913 */
8914HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8915{
8916 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8917
8918 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8919 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8920 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8921 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8922 | CPUMCTX_EXTRN_HWVIRT
8923 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8924 AssertRCReturn(rc, rc);
8925
8926 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8927
8928 VMXVEXITINFO ExitInfo;
8929 RT_ZERO(ExitInfo);
8930 ExitInfo.uReason = pVmxTransient->uExitReason;
8931 ExitInfo.u64Qual = pVmxTransient->uExitQual;
8932 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
8933 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
8934 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
8935
8936 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
8937 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8938 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
8939 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8940 {
8941 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8942 rcStrict = VINF_SUCCESS;
8943 }
8944 return rcStrict;
8945}
8946
8947
8948/**
8949 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
8950 */
8951HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8952{
8953 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8954
8955 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
8956 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
8957 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8958 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8959 AssertRCReturn(rc, rc);
8960
8961 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8962
8963 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
8964 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
8965 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
8966 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8967 {
8968 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8969 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
8970 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
8971 }
8972 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8973 return rcStrict;
8974}
8975
8976
8977/**
8978 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
8979 */
8980HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8981{
8982 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8983
8984 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8985 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8986 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8987 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8988 | CPUMCTX_EXTRN_HWVIRT
8989 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8990 AssertRCReturn(rc, rc);
8991
8992 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8993
8994 VMXVEXITINFO ExitInfo;
8995 RT_ZERO(ExitInfo);
8996 ExitInfo.uReason = pVmxTransient->uExitReason;
8997 ExitInfo.u64Qual = pVmxTransient->uExitQual;
8998 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
8999 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9000 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9001
9002 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9003 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9004 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9005 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9006 {
9007 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9008 rcStrict = VINF_SUCCESS;
9009 }
9010 return rcStrict;
9011}
9012
9013
9014/**
9015 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9016 */
9017HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9018{
9019 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9020
9021 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9022 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9023 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9024 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9025 | CPUMCTX_EXTRN_HWVIRT
9026 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9027 AssertRCReturn(rc, rc);
9028
9029 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9030
9031 VMXVEXITINFO ExitInfo;
9032 RT_ZERO(ExitInfo);
9033 ExitInfo.uReason = pVmxTransient->uExitReason;
9034 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9035 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9036 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9037 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9038
9039 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9040 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9041 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9042 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9043 {
9044 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9045 rcStrict = VINF_SUCCESS;
9046 }
9047 return rcStrict;
9048}
9049
9050
9051/**
9052 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9053 */
9054HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9055{
9056 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9057
9058 /*
9059 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9060 * thus might not need to import the shadow VMCS state, it's safer just in case
9061 * code elsewhere dares look at unsynced VMCS fields.
9062 */
9063 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9064 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9065 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9066 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9067 | CPUMCTX_EXTRN_HWVIRT
9068 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9069 AssertRCReturn(rc, rc);
9070
9071 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9072
9073 VMXVEXITINFO ExitInfo;
9074 RT_ZERO(ExitInfo);
9075 ExitInfo.uReason = pVmxTransient->uExitReason;
9076 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9077 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9078 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9079 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9080 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9081
9082 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9083 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9084 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9085 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9086 {
9087 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9088 rcStrict = VINF_SUCCESS;
9089 }
9090 return rcStrict;
9091}
9092
9093
9094/**
9095 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9096 */
9097HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9098{
9099 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9100
9101 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9102 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9103 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9104 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9105 AssertRCReturn(rc, rc);
9106
9107 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9108
9109 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9110 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9111 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9112 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9113 {
9114 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9115 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9116 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9117 }
9118 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9119 return rcStrict;
9120}
9121
9122
9123/**
9124 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9125 */
9126HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9127{
9128 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9129
9130 /*
9131 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9132 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9133 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9134 */
9135 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9136 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9137 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9138 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9139 | CPUMCTX_EXTRN_HWVIRT
9140 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9141 AssertRCReturn(rc, rc);
9142
9143 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9144
9145 VMXVEXITINFO ExitInfo;
9146 RT_ZERO(ExitInfo);
9147 ExitInfo.uReason = pVmxTransient->uExitReason;
9148 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9149 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9150 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9151 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9152 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9153
9154 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9155 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9156 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9157 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9158 {
9159 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9160 rcStrict = VINF_SUCCESS;
9161 }
9162 return rcStrict;
9163}
9164
9165
9166/**
9167 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9168 */
9169HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9170{
9171 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9172
9173 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9174 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
9175 | CPUMCTX_EXTRN_HWVIRT
9176 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9177 AssertRCReturn(rc, rc);
9178
9179 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9180
9181 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9182 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9183 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9184 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9185 {
9186 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9187 rcStrict = VINF_SUCCESS;
9188 }
9189 return rcStrict;
9190}
9191
9192
9193/**
9194 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9195 */
9196HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9197{
9198 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9199
9200 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9201 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9202 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9203 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9204 | CPUMCTX_EXTRN_HWVIRT
9205 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9206 AssertRCReturn(rc, rc);
9207
9208 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9209
9210 VMXVEXITINFO ExitInfo;
9211 RT_ZERO(ExitInfo);
9212 ExitInfo.uReason = pVmxTransient->uExitReason;
9213 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9214 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9215 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9216 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9217
9218 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9219 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9220 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9221 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9222 {
9223 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9224 rcStrict = VINF_SUCCESS;
9225 }
9226 return rcStrict;
9227}
9228
9229
9230/**
9231 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9232 */
9233HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9234{
9235 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9236
9237 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9238 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9239 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9240 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9241 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9242 AssertRCReturn(rc, rc);
9243
9244 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9245
9246 VMXVEXITINFO ExitInfo;
9247 RT_ZERO(ExitInfo);
9248 ExitInfo.uReason = pVmxTransient->uExitReason;
9249 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9250 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9251 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9252 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9253
9254 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9255 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9256 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9257 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9258 {
9259 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9260 rcStrict = VINF_SUCCESS;
9261 }
9262 return rcStrict;
9263}
9264
9265
9266# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9267/**
9268 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9269 */
9270HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9271{
9272 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9273
9274 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9275 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9276 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9277 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9278 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9279 AssertRCReturn(rc, rc);
9280
9281 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9282
9283 VMXVEXITINFO ExitInfo;
9284 RT_ZERO(ExitInfo);
9285 ExitInfo.uReason = pVmxTransient->uExitReason;
9286 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9287 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9288 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9289 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9290
9291 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9292 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9293 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9294 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9295 {
9296 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9297 rcStrict = VINF_SUCCESS;
9298 }
9299 return rcStrict;
9300}
9301# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9302#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9303/** @} */
9304
9305
9306#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9307/** @name Nested-guest VM-exit handlers.
9308 * @{
9309 */
9310/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9311/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9312/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9313
9314/**
9315 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9316 * Conditional VM-exit.
9317 */
9318HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9319{
9320 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9321
9322 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
9323
9324 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9325 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9326 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9327
9328 switch (uExitIntType)
9329 {
9330#ifndef IN_NEM_DARWIN
9331 /*
9332 * Physical NMIs:
9333 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9334 */
9335 case VMX_EXIT_INT_INFO_TYPE_NMI:
9336 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9337#endif
9338
9339 /*
9340 * Hardware exceptions,
9341 * Software exceptions,
9342 * Privileged software exceptions:
9343 * Figure out if the exception must be delivered to the guest or the nested-guest.
9344 */
9345 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9346 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9347 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9348 {
9349 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
9350 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9351 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9352 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9353
9354 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9355 bool const fIntercept = CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo),
9356 pVmxTransient->uExitIntErrorCode);
9357 if (fIntercept)
9358 {
9359 /* Exit qualification is required for debug and page-fault exceptions. */
9360 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9361
9362 /*
9363 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9364 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9365 * length. However, if delivery of a software interrupt, software exception or privileged
9366 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9367 */
9368 VMXVEXITINFO ExitInfo;
9369 RT_ZERO(ExitInfo);
9370 ExitInfo.uReason = pVmxTransient->uExitReason;
9371 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9372 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9373
9374 VMXVEXITEVENTINFO ExitEventInfo;
9375 RT_ZERO(ExitEventInfo);
9376 ExitEventInfo.uExitIntInfo = pVmxTransient->uExitIntInfo;
9377 ExitEventInfo.uExitIntErrCode = pVmxTransient->uExitIntErrorCode;
9378 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9379 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9380
9381#ifdef DEBUG_ramshankar
9382 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9383 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n", pVmxTransient->uExitIntInfo,
9384 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9385 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9386 {
9387 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n", pVmxTransient->uIdtVectoringInfo,
9388 pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9389 }
9390#endif
9391 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9392 }
9393
9394 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9395 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9396 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9397 }
9398
9399 /*
9400 * Software interrupts:
9401 * VM-exits cannot be caused by software interrupts.
9402 *
9403 * External interrupts:
9404 * This should only happen when "acknowledge external interrupts on VM-exit"
9405 * control is set. However, we never set this when executing a guest or
9406 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9407 * the guest.
9408 */
9409 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9410 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9411 default:
9412 {
9413 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9414 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9415 }
9416 }
9417}
9418
9419
9420/**
9421 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
9422 * Unconditional VM-exit.
9423 */
9424HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9425{
9426 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9427 return IEMExecVmxVmexitTripleFault(pVCpu);
9428}
9429
9430
9431/**
9432 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9433 */
9434HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9435{
9436 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9437
9438 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
9439 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9440 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9441}
9442
9443
9444/**
9445 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9446 */
9447HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9448{
9449 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9450
9451 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
9452 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9453 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9454}
9455
9456
9457/**
9458 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
9459 * Unconditional VM-exit.
9460 */
9461HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9462{
9463 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9464
9465 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9466 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9467 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9468 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9469
9470 VMXVEXITINFO ExitInfo;
9471 RT_ZERO(ExitInfo);
9472 ExitInfo.uReason = pVmxTransient->uExitReason;
9473 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9474 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9475
9476 VMXVEXITEVENTINFO ExitEventInfo;
9477 RT_ZERO(ExitEventInfo);
9478 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9479 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9480 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
9481}
9482
9483
9484/**
9485 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
9486 */
9487HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9488{
9489 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9490
9491 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
9492 {
9493 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9494 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9495 }
9496 return vmxHCExitHlt(pVCpu, pVmxTransient);
9497}
9498
9499
9500/**
9501 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
9502 */
9503HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9504{
9505 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9506
9507 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
9508 {
9509 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9510 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9511
9512 VMXVEXITINFO ExitInfo;
9513 RT_ZERO(ExitInfo);
9514 ExitInfo.uReason = pVmxTransient->uExitReason;
9515 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9516 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9517 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9518 }
9519 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
9520}
9521
9522
9523/**
9524 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
9525 */
9526HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9527{
9528 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9529
9530 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
9531 {
9532 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9533 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9534 }
9535 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
9536}
9537
9538
9539/**
9540 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
9541 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
9542 */
9543HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9544{
9545 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9546
9547 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
9548 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
9549
9550 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9551
9552 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
9553 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9554 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9555
9556 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
9557 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
9558 u64VmcsField &= UINT64_C(0xffffffff);
9559
9560 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
9561 {
9562 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9563 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9564
9565 VMXVEXITINFO ExitInfo;
9566 RT_ZERO(ExitInfo);
9567 ExitInfo.uReason = pVmxTransient->uExitReason;
9568 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9569 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9570 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9571 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9572 }
9573
9574 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
9575 return vmxHCExitVmread(pVCpu, pVmxTransient);
9576 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
9577}
9578
9579
9580/**
9581 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
9582 */
9583HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9584{
9585 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9586
9587 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
9588 {
9589 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9590 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9591 }
9592
9593 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
9594}
9595
9596
9597/**
9598 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
9599 * Conditional VM-exit.
9600 */
9601HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9602{
9603 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9604
9605 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9606 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9607
9608 VBOXSTRICTRC rcStrict;
9609 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
9610 switch (uAccessType)
9611 {
9612 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
9613 {
9614 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9615 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9616 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9617 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9618
9619 bool fIntercept;
9620 switch (iCrReg)
9621 {
9622 case 0:
9623 case 4:
9624 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
9625 break;
9626
9627 case 3:
9628 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
9629 break;
9630
9631 case 8:
9632 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
9633 break;
9634
9635 default:
9636 fIntercept = false;
9637 break;
9638 }
9639 if (fIntercept)
9640 {
9641 VMXVEXITINFO ExitInfo;
9642 RT_ZERO(ExitInfo);
9643 ExitInfo.uReason = pVmxTransient->uExitReason;
9644 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9645 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9646 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9647 }
9648 else
9649 {
9650 int const rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
9651 AssertRCReturn(rc, rc);
9652 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9653 }
9654 break;
9655 }
9656
9657 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
9658 {
9659 /*
9660 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
9661 * CR2 reads do not cause a VM-exit.
9662 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
9663 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
9664 */
9665 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9666 if ( iCrReg == 3
9667 || iCrReg == 8)
9668 {
9669 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
9670 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
9671 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
9672 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
9673 {
9674 VMXVEXITINFO ExitInfo;
9675 RT_ZERO(ExitInfo);
9676 ExitInfo.uReason = pVmxTransient->uExitReason;
9677 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9678 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9679 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9680 }
9681 else
9682 {
9683 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9684 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9685 }
9686 }
9687 else
9688 {
9689 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
9690 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
9691 }
9692 break;
9693 }
9694
9695 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
9696 {
9697 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
9698 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
9699 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
9700 if ( (uGstHostMask & X86_CR0_TS)
9701 && (uReadShadow & X86_CR0_TS))
9702 {
9703 VMXVEXITINFO ExitInfo;
9704 RT_ZERO(ExitInfo);
9705 ExitInfo.uReason = pVmxTransient->uExitReason;
9706 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9707 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9708 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9709 }
9710 else
9711 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
9712 break;
9713 }
9714
9715 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
9716 {
9717 RTGCPTR GCPtrEffDst;
9718 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
9719 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
9720 if (fMemOperand)
9721 {
9722 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9723 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
9724 }
9725 else
9726 GCPtrEffDst = NIL_RTGCPTR;
9727
9728 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
9729 {
9730 VMXVEXITINFO ExitInfo;
9731 RT_ZERO(ExitInfo);
9732 ExitInfo.uReason = pVmxTransient->uExitReason;
9733 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9734 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
9735 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9736 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9737 }
9738 else
9739 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
9740 break;
9741 }
9742
9743 default:
9744 {
9745 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
9746 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
9747 }
9748 }
9749
9750 if (rcStrict == VINF_IEM_RAISED_XCPT)
9751 {
9752 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9753 rcStrict = VINF_SUCCESS;
9754 }
9755 return rcStrict;
9756}
9757
9758
9759/**
9760 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
9761 * Conditional VM-exit.
9762 */
9763HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9764{
9765 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9766
9767 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
9768 {
9769 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9770 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9771
9772 VMXVEXITINFO ExitInfo;
9773 RT_ZERO(ExitInfo);
9774 ExitInfo.uReason = pVmxTransient->uExitReason;
9775 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9776 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9777 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9778 }
9779 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
9780}
9781
9782
9783/**
9784 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
9785 * Conditional VM-exit.
9786 */
9787HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9788{
9789 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9790
9791 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9792
9793 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
9794 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
9795 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
9796
9797 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
9798 uint8_t const cbAccess = s_aIOSizes[uIOSize];
9799 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
9800 {
9801 /*
9802 * IN/OUT instruction:
9803 * - Provides VM-exit instruction length.
9804 *
9805 * INS/OUTS instruction:
9806 * - Provides VM-exit instruction length.
9807 * - Provides Guest-linear address.
9808 * - Optionally provides VM-exit instruction info (depends on CPU feature).
9809 */
9810 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9811 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9812
9813 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
9814 pVmxTransient->ExitInstrInfo.u = 0;
9815 pVmxTransient->uGuestLinearAddr = 0;
9816
9817 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
9818 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
9819 if (fIOString)
9820 {
9821 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9822 if (fVmxInsOutsInfo)
9823 {
9824 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
9825 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9826 }
9827 }
9828
9829 VMXVEXITINFO ExitInfo;
9830 RT_ZERO(ExitInfo);
9831 ExitInfo.uReason = pVmxTransient->uExitReason;
9832 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9833 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9834 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9835 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
9836 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9837 }
9838 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
9839}
9840
9841
9842/**
9843 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
9844 */
9845HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9846{
9847 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9848
9849 uint32_t fMsrpm;
9850 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9851 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9852 else
9853 fMsrpm = VMXMSRPM_EXIT_RD;
9854
9855 if (fMsrpm & VMXMSRPM_EXIT_RD)
9856 {
9857 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9858 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9859 }
9860 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
9861}
9862
9863
9864/**
9865 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
9866 */
9867HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9868{
9869 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9870
9871 uint32_t fMsrpm;
9872 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9873 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9874 else
9875 fMsrpm = VMXMSRPM_EXIT_WR;
9876
9877 if (fMsrpm & VMXMSRPM_EXIT_WR)
9878 {
9879 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9880 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9881 }
9882 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
9883}
9884
9885
9886/**
9887 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
9888 */
9889HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9890{
9891 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9892
9893 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
9894 {
9895 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9896 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9897 }
9898 return vmxHCExitMwait(pVCpu, pVmxTransient);
9899}
9900
9901
9902/**
9903 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
9904 * VM-exit.
9905 */
9906HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9907{
9908 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9909
9910 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
9911 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9912 VMXVEXITINFO ExitInfo;
9913 RT_ZERO(ExitInfo);
9914 ExitInfo.uReason = pVmxTransient->uExitReason;
9915 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9916 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9917}
9918
9919
9920/**
9921 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
9922 */
9923HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9924{
9925 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9926
9927 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
9928 {
9929 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9930 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9931 }
9932 return vmxHCExitMonitor(pVCpu, pVmxTransient);
9933}
9934
9935
9936/**
9937 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
9938 */
9939HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9940{
9941 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9942
9943 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
9944 * PAUSE when executing a nested-guest? If it does not, we would not need
9945 * to check for the intercepts here. Just call VM-exit... */
9946
9947 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
9948 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
9949 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
9950 {
9951 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9952 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9953 }
9954 return vmxHCExitPause(pVCpu, pVmxTransient);
9955}
9956
9957
9958/**
9959 * Nested-guest VM-exit handler for when the TPR value is lowered below the
9960 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
9961 */
9962HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9963{
9964 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9965
9966 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
9967 {
9968 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9969 VMXVEXITINFO ExitInfo;
9970 RT_ZERO(ExitInfo);
9971 ExitInfo.uReason = pVmxTransient->uExitReason;
9972 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9973 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9974 }
9975 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
9976}
9977
9978
9979/**
9980 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
9981 * VM-exit.
9982 */
9983HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9984{
9985 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9986
9987 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9988 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9989 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9990 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9991
9992 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
9993
9994 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
9995 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
9996
9997 VMXVEXITINFO ExitInfo;
9998 RT_ZERO(ExitInfo);
9999 ExitInfo.uReason = pVmxTransient->uExitReason;
10000 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10001 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10002
10003 VMXVEXITEVENTINFO ExitEventInfo;
10004 RT_ZERO(ExitEventInfo);
10005 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10006 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10007 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10008}
10009
10010
10011/**
10012 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10013 * Conditional VM-exit.
10014 */
10015HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10016{
10017 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10018
10019 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10020 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10021 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10022}
10023
10024
10025/**
10026 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10027 * Conditional VM-exit.
10028 */
10029HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10030{
10031 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10032
10033 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10034 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10035 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10036}
10037
10038
10039/**
10040 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10041 */
10042HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10043{
10044 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10045
10046 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10047 {
10048 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10049 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10050 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10051 }
10052 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10053}
10054
10055
10056/**
10057 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10058 */
10059HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10060{
10061 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10062
10063 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10064 {
10065 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10066 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10067 }
10068 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10069}
10070
10071
10072/**
10073 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10074 */
10075HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10076{
10077 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10078
10079 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10080 {
10081 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10082 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10083 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10084 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10085
10086 VMXVEXITINFO ExitInfo;
10087 RT_ZERO(ExitInfo);
10088 ExitInfo.uReason = pVmxTransient->uExitReason;
10089 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10090 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10091 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10092 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10093 }
10094 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10095}
10096
10097
10098/**
10099 * Nested-guest VM-exit handler for invalid-guest state
10100 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10101 */
10102HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10103{
10104 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10105
10106 /*
10107 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10108 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10109 * Handle it like it's in an invalid guest state of the outer guest.
10110 *
10111 * When the fast path is implemented, this should be changed to cause the corresponding
10112 * nested-guest VM-exit.
10113 */
10114 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10115}
10116
10117
10118/**
10119 * Nested-guest VM-exit handler for instructions that cause VM-exits uncondtionally
10120 * and only provide the instruction length.
10121 *
10122 * Unconditional VM-exit.
10123 */
10124HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10125{
10126 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10127
10128#ifdef VBOX_STRICT
10129 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10130 switch (pVmxTransient->uExitReason)
10131 {
10132 case VMX_EXIT_ENCLS:
10133 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10134 break;
10135
10136 case VMX_EXIT_VMFUNC:
10137 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10138 break;
10139 }
10140#endif
10141
10142 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10143 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10144}
10145
10146
10147/**
10148 * Nested-guest VM-exit handler for instructions that provide instruction length as
10149 * well as more information.
10150 *
10151 * Unconditional VM-exit.
10152 */
10153HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10154{
10155 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10156
10157#ifdef VBOX_STRICT
10158 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10159 switch (pVmxTransient->uExitReason)
10160 {
10161 case VMX_EXIT_GDTR_IDTR_ACCESS:
10162 case VMX_EXIT_LDTR_TR_ACCESS:
10163 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10164 break;
10165
10166 case VMX_EXIT_RDRAND:
10167 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10168 break;
10169
10170 case VMX_EXIT_RDSEED:
10171 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10172 break;
10173
10174 case VMX_EXIT_XSAVES:
10175 case VMX_EXIT_XRSTORS:
10176 /** @todo NSTVMX: Verify XSS-bitmap. */
10177 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10178 break;
10179
10180 case VMX_EXIT_UMWAIT:
10181 case VMX_EXIT_TPAUSE:
10182 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10183 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10184 break;
10185
10186 case VMX_EXIT_LOADIWKEY:
10187 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10188 break;
10189 }
10190#endif
10191
10192 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10193 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10194 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10195
10196 VMXVEXITINFO ExitInfo;
10197 RT_ZERO(ExitInfo);
10198 ExitInfo.uReason = pVmxTransient->uExitReason;
10199 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10200 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10201 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10202 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10203}
10204
10205/** @} */
10206#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10207
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette