VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 77807

Last change on this file since 77807 was 77590, checked in by vboxsync, 6 years ago

Adjusted for clarity. Not a bug fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 582.7 KB
Line 
1/* $Id: HMVMXR0.cpp 77590 2019-03-06 20:52:35Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/x86.h>
25#include <iprt/asm-amd64-x86.h>
26#include <iprt/thread.h>
27
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/dbgf.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/iom.h>
32#include <VBox/vmm/selm.h>
33#include <VBox/vmm/tm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/gim.h>
36#include <VBox/vmm/apic.h>
37#ifdef VBOX_WITH_REM
38# include <VBox/vmm/rem.h>
39#endif
40#include "HMInternal.h"
41#include <VBox/vmm/vm.h>
42#include <VBox/vmm/hmvmxinline.h>
43#include "HMVMXR0.h"
44#include "dtrace/VBoxVMM.h"
45
46#ifdef DEBUG_ramshankar
47# define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS
48# define HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE
49# define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
50# define HMVMX_ALWAYS_CHECK_GUEST_STATE
51# define HMVMX_ALWAYS_TRAP_ALL_XCPTS
52# define HMVMX_ALWAYS_TRAP_PF
53# define HMVMX_ALWAYS_FLUSH_TLB
54# define HMVMX_ALWAYS_SWAP_EFER
55#endif
56
57
58/*********************************************************************************************************************************
59* Defined Constants And Macros *
60*********************************************************************************************************************************/
61/** Use the function table. */
62#define HMVMX_USE_FUNCTION_TABLE
63
64/** Determine which tagged-TLB flush handler to use. */
65#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
66#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
67#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
68#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
69
70/** @name HMVMX_READ_XXX
71 * Flags to skip redundant reads of some common VMCS fields that are not part of
72 * the guest-CPU or VCPU state but are needed while handling VM-exits.
73 */
74#define HMVMX_READ_IDT_VECTORING_INFO RT_BIT_32(0)
75#define HMVMX_READ_IDT_VECTORING_ERROR_CODE RT_BIT_32(1)
76#define HMVMX_READ_EXIT_QUALIFICATION RT_BIT_32(2)
77#define HMVMX_READ_EXIT_INSTR_LEN RT_BIT_32(3)
78#define HMVMX_READ_EXIT_INTERRUPTION_INFO RT_BIT_32(4)
79#define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE RT_BIT_32(5)
80#define HMVMX_READ_EXIT_INSTR_INFO RT_BIT_32(6)
81#define HMVMX_READ_GUEST_LINEAR_ADDR RT_BIT_32(7)
82/** @} */
83
84/**
85 * States of the VMCS.
86 *
87 * This does not reflect all possible VMCS states but currently only those
88 * needed for maintaining the VMCS consistently even when thread-context hooks
89 * are used. Maybe later this can be extended (i.e. Nested Virtualization).
90 */
91#define HMVMX_VMCS_STATE_CLEAR RT_BIT(0)
92#define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1)
93#define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2)
94
95/**
96 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
97 * guest using hardware-assisted VMX.
98 *
99 * This excludes state like GPRs (other than RSP) which are always are
100 * swapped and restored across the world-switch and also registers like EFER,
101 * MSR which cannot be modified by the guest without causing a VM-exit.
102 */
103#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
104 | CPUMCTX_EXTRN_RFLAGS \
105 | CPUMCTX_EXTRN_RSP \
106 | CPUMCTX_EXTRN_SREG_MASK \
107 | CPUMCTX_EXTRN_TABLE_MASK \
108 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
109 | CPUMCTX_EXTRN_SYSCALL_MSRS \
110 | CPUMCTX_EXTRN_SYSENTER_MSRS \
111 | CPUMCTX_EXTRN_TSC_AUX \
112 | CPUMCTX_EXTRN_OTHER_MSRS \
113 | CPUMCTX_EXTRN_CR0 \
114 | CPUMCTX_EXTRN_CR3 \
115 | CPUMCTX_EXTRN_CR4 \
116 | CPUMCTX_EXTRN_DR7 \
117 | CPUMCTX_EXTRN_HM_VMX_MASK)
118
119/**
120 * Exception bitmap mask for real-mode guests (real-on-v86).
121 *
122 * We need to intercept all exceptions manually except:
123 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
124 * due to bugs in Intel CPUs.
125 * - \#PF need not be intercepted even in real-mode if we have Nested Paging
126 * support.
127 */
128#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
129 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
130 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
131 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
132 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
133 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
134 | RT_BIT(X86_XCPT_XF))
135
136/** Maximum VM-instruction error number. */
137#define HMVMX_INSTR_ERROR_MAX 28
138
139/** Profiling macro. */
140#ifdef HM_PROFILE_EXIT_DISPATCH
141# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
142# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
143#else
144# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
145# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
146#endif
147
148/** Assert that preemption is disabled or covered by thread-context hooks. */
149#define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
150 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
151
152/** Assert that we haven't migrated CPUs when thread-context hooks are not
153 * used. */
154#define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
155 || (a_pVCpu)->hm.s.idEnteredCpu == RTMpCpuId(), \
156 ("Illegal migration! Entered on CPU %u Current %u\n", \
157 (a_pVCpu)->hm.s.idEnteredCpu, RTMpCpuId()))
158
159/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
160 * context. */
161#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
162 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
163 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
164
165/** Macro for importing guest state from the VMCS back into CPUMCTX (intended to be
166 * used only from VM-exit handlers). */
167#define HMVMX_CPUMCTX_IMPORT_STATE(a_pVCpu, a_fWhat) (hmR0VmxImportGuestState((a_pVCpu), (a_fWhat)))
168
169/** Helper macro for VM-exit handlers called unexpectedly. */
170#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_pVmxTransient) \
171 do { \
172 (a_pVCpu)->hm.s.u32HMError = (a_pVmxTransient)->uExitReason; \
173 return VERR_VMX_UNEXPECTED_EXIT; \
174 } while (0)
175
176/** Macro for importing segment registers to the VMCS from the guest-CPU context. */
177#ifdef VMX_USE_CACHED_VMCS_ACCESSES
178# define HMVMX_IMPORT_SREG(Sel, a_pCtxSelReg) \
179 hmR0VmxImportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
180 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
181#else
182# define HMVMX_IMPORT_SREG(Sel, a_pCtxSelReg) \
183 hmR0VmxImportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
184 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
185#endif
186
187/** Macro for exporting segment registers to the VMCS from the guest-CPU context. */
188#define HMVMX_EXPORT_SREG(Sel, a_pCtxSelReg) \
189 hmR0VmxExportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
190 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
191
192#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
193/** Macro that does the necessary privilege checks and intercepted VM-exits for
194 * guests that attempted to execute a VMX instruction. */
195# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
196 do \
197 { \
198 VBOXSTRICTRC rcStrictTmp = hmR0VmxCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
199 if (rcStrictTmp == VINF_SUCCESS) \
200 { /* likely */ } \
201 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
202 { \
203 Assert((a_pVCpu)->hm.s.Event.fPending); \
204 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
205 return VINF_SUCCESS; \
206 } \
207 else \
208 { \
209 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
210 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
211 } \
212 } while (0)
213
214/** Macro that decodes a memory operand for an instruction VM-exit. */
215# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
216 do \
217 { \
218 VBOXSTRICTRC rcStrictTmp = hmR0VmxDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
219 (a_pGCPtrEffAddr)); \
220 if (rcStrictTmp == VINF_SUCCESS) \
221 { /* likely */ } \
222 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
223 { \
224 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
225 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
226 NOREF(uXcptTmp); \
227 return VINF_SUCCESS; \
228 } \
229 else \
230 { \
231 Log4Func(("hmR0VmxDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
232 return rcStrictTmp; \
233 } \
234 } while (0)
235
236#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
237
238
239/*********************************************************************************************************************************
240* Structures and Typedefs *
241*********************************************************************************************************************************/
242/**
243 * VMX transient state.
244 *
245 * A state structure for holding miscellaneous information across
246 * VMX non-root operation and restored after the transition.
247 */
248typedef struct VMXTRANSIENT
249{
250 /** The host's rflags/eflags. */
251 RTCCUINTREG fEFlags;
252#if HC_ARCH_BITS == 32
253 uint32_t u32Alignment0;
254#endif
255 /** The guest's TPR value used for TPR shadowing. */
256 uint8_t u8GuestTpr;
257 /** Alignment. */
258 uint8_t abAlignment0[7];
259
260 /** The basic VM-exit reason. */
261 uint16_t uExitReason;
262 /** Alignment. */
263 uint16_t u16Alignment0;
264 /** The VM-exit interruption error code. */
265 uint32_t uExitIntErrorCode;
266 /** The VM-exit exit code qualification. */
267 uint64_t uExitQual;
268 /** The Guest-linear address. */
269 uint64_t uGuestLinearAddr;
270
271 /** The VM-exit interruption-information field. */
272 uint32_t uExitIntInfo;
273 /** The VM-exit instruction-length field. */
274 uint32_t cbInstr;
275 /** The VM-exit instruction-information field. */
276 VMXEXITINSTRINFO ExitInstrInfo;
277 /** Whether the VM-entry failed or not. */
278 bool fVMEntryFailed;
279 /** Alignment. */
280 uint8_t abAlignment1[3];
281
282 /** The VM-entry interruption-information field. */
283 uint32_t uEntryIntInfo;
284 /** The VM-entry exception error code field. */
285 uint32_t uEntryXcptErrorCode;
286 /** The VM-entry instruction length field. */
287 uint32_t cbEntryInstr;
288
289 /** IDT-vectoring information field. */
290 uint32_t uIdtVectoringInfo;
291 /** IDT-vectoring error code. */
292 uint32_t uIdtVectoringErrorCode;
293
294 /** Mask of currently read VMCS fields; HMVMX_READ_XXX. */
295 uint32_t fVmcsFieldsRead;
296
297 /** Whether the guest debug state was active at the time of VM-exit. */
298 bool fWasGuestDebugStateActive;
299 /** Whether the hyper debug state was active at the time of VM-exit. */
300 bool fWasHyperDebugStateActive;
301 /** Whether TSC-offsetting should be setup before VM-entry. */
302 bool fUpdateTscOffsettingAndPreemptTimer;
303 /** Whether the VM-exit was caused by a page-fault during delivery of a
304 * contributory exception or a page-fault. */
305 bool fVectoringDoublePF;
306 /** Whether the VM-exit was caused by a page-fault during delivery of an
307 * external interrupt or NMI. */
308 bool fVectoringPF;
309} VMXTRANSIENT;
310AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
311AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntInfo, sizeof(uint64_t));
312AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo, sizeof(uint64_t));
313AssertCompileMemberAlignment(VMXTRANSIENT, fWasGuestDebugStateActive, sizeof(uint64_t));
314AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
315/** Pointer to VMX transient state. */
316typedef VMXTRANSIENT *PVMXTRANSIENT;
317
318/**
319 * Memory operand read or write access.
320 */
321typedef enum VMXMEMACCESS
322{
323 VMXMEMACCESS_READ = 0,
324 VMXMEMACCESS_WRITE = 1
325} VMXMEMACCESS;
326
327/**
328 * VMX VM-exit handler.
329 *
330 * @returns Strict VBox status code (i.e. informational status codes too).
331 * @param pVCpu The cross context virtual CPU structure.
332 * @param pVmxTransient Pointer to the VMX-transient structure.
333 */
334#ifndef HMVMX_USE_FUNCTION_TABLE
335typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
336#else
337typedef DECLCALLBACK(VBOXSTRICTRC) FNVMXEXITHANDLER(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
338/** Pointer to VM-exit handler. */
339typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
340#endif
341
342/**
343 * VMX VM-exit handler, non-strict status code.
344 *
345 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
346 *
347 * @returns VBox status code, no informational status code returned.
348 * @param pVCpu The cross context virtual CPU structure.
349 * @param pVmxTransient Pointer to the VMX-transient structure.
350 *
351 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
352 * use of that status code will be replaced with VINF_EM_SOMETHING
353 * later when switching over to IEM.
354 */
355#ifndef HMVMX_USE_FUNCTION_TABLE
356typedef int FNVMXEXITHANDLERNSRC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
357#else
358typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
359#endif
360
361
362/*********************************************************************************************************************************
363* Internal Functions *
364*********************************************************************************************************************************/
365static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXTLBFLUSHEPT enmTlbFlush);
366static void hmR0VmxFlushVpid(PVMCPU pVCpu, VMXTLBFLUSHVPID enmTlbFlush, RTGCPTR GCPtr);
367static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu);
368static int hmR0VmxImportGuestState(PVMCPU pVCpu, uint64_t fWhat);
369static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, uint64_t u64IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
370 RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *pfIntrState);
371#if HC_ARCH_BITS == 32
372static int hmR0VmxInitVmcsReadCache(PVMCPU pVCpu);
373#endif
374#ifndef HMVMX_USE_FUNCTION_TABLE
375DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
376# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
377# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
378#else
379# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
380# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
381#endif
382
383/** @name VM-exit handlers.
384 * @{
385 */
386static FNVMXEXITHANDLER hmR0VmxExitXcptOrNmi;
387static FNVMXEXITHANDLER hmR0VmxExitExtInt;
388static FNVMXEXITHANDLER hmR0VmxExitTripleFault;
389static FNVMXEXITHANDLERNSRC hmR0VmxExitInitSignal;
390static FNVMXEXITHANDLERNSRC hmR0VmxExitSipi;
391static FNVMXEXITHANDLERNSRC hmR0VmxExitIoSmi;
392static FNVMXEXITHANDLERNSRC hmR0VmxExitSmi;
393static FNVMXEXITHANDLERNSRC hmR0VmxExitIntWindow;
394static FNVMXEXITHANDLERNSRC hmR0VmxExitNmiWindow;
395static FNVMXEXITHANDLER hmR0VmxExitTaskSwitch;
396static FNVMXEXITHANDLER hmR0VmxExitCpuid;
397static FNVMXEXITHANDLER hmR0VmxExitGetsec;
398static FNVMXEXITHANDLER hmR0VmxExitHlt;
399static FNVMXEXITHANDLERNSRC hmR0VmxExitInvd;
400static FNVMXEXITHANDLER hmR0VmxExitInvlpg;
401static FNVMXEXITHANDLER hmR0VmxExitRdpmc;
402static FNVMXEXITHANDLER hmR0VmxExitVmcall;
403#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
404static FNVMXEXITHANDLER hmR0VmxExitVmclear;
405static FNVMXEXITHANDLER hmR0VmxExitVmlaunch;
406static FNVMXEXITHANDLER hmR0VmxExitVmptrld;
407static FNVMXEXITHANDLER hmR0VmxExitVmptrst;
408static FNVMXEXITHANDLER hmR0VmxExitVmread;
409static FNVMXEXITHANDLER hmR0VmxExitVmresume;
410static FNVMXEXITHANDLER hmR0VmxExitVmwrite;
411static FNVMXEXITHANDLER hmR0VmxExitVmxoff;
412static FNVMXEXITHANDLER hmR0VmxExitVmxon;
413#endif
414static FNVMXEXITHANDLER hmR0VmxExitRdtsc;
415static FNVMXEXITHANDLERNSRC hmR0VmxExitRsm;
416static FNVMXEXITHANDLERNSRC hmR0VmxExitSetPendingXcptUD;
417static FNVMXEXITHANDLER hmR0VmxExitMovCRx;
418static FNVMXEXITHANDLER hmR0VmxExitMovDRx;
419static FNVMXEXITHANDLER hmR0VmxExitIoInstr;
420static FNVMXEXITHANDLER hmR0VmxExitRdmsr;
421static FNVMXEXITHANDLER hmR0VmxExitWrmsr;
422static FNVMXEXITHANDLERNSRC hmR0VmxExitErrInvalidGuestState;
423static FNVMXEXITHANDLERNSRC hmR0VmxExitErrMsrLoad;
424static FNVMXEXITHANDLERNSRC hmR0VmxExitErrUndefined;
425static FNVMXEXITHANDLER hmR0VmxExitMwait;
426static FNVMXEXITHANDLER hmR0VmxExitMtf;
427static FNVMXEXITHANDLER hmR0VmxExitMonitor;
428static FNVMXEXITHANDLER hmR0VmxExitPause;
429static FNVMXEXITHANDLERNSRC hmR0VmxExitErrMachineCheck;
430static FNVMXEXITHANDLERNSRC hmR0VmxExitTprBelowThreshold;
431static FNVMXEXITHANDLER hmR0VmxExitApicAccess;
432static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
433static FNVMXEXITHANDLER hmR0VmxExitEptViolation;
434static FNVMXEXITHANDLER hmR0VmxExitEptMisconfig;
435static FNVMXEXITHANDLER hmR0VmxExitRdtscp;
436static FNVMXEXITHANDLER hmR0VmxExitPreemptTimer;
437static FNVMXEXITHANDLERNSRC hmR0VmxExitWbinvd;
438static FNVMXEXITHANDLER hmR0VmxExitXsetbv;
439static FNVMXEXITHANDLER hmR0VmxExitRdrand;
440static FNVMXEXITHANDLER hmR0VmxExitInvpcid;
441/** @} */
442
443static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
444static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
445static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
446static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
447static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
448static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
449static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
450static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu);
451
452
453/*********************************************************************************************************************************
454* Global Variables *
455*********************************************************************************************************************************/
456#ifdef HMVMX_USE_FUNCTION_TABLE
457
458/**
459 * VMX_EXIT dispatch table.
460 */
461static const PFNVMXEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
462{
463 /* 00 VMX_EXIT_XCPT_OR_NMI */ hmR0VmxExitXcptOrNmi,
464 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
465 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
466 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
467 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
468 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
469 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
470 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
471 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
472 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
473 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
474 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
475 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
476 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
477 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
478 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
479 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
480 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
481 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitVmcall,
482#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
483 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitVmclear,
484 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitVmlaunch,
485 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitVmptrld,
486 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitVmptrst,
487 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitVmread,
488 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitVmresume,
489 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitVmwrite,
490 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitVmxoff,
491 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitVmxon,
492#else
493 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
494 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
495 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
496 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
497 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
498 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
499 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
500 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
501 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
502#endif
503 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
504 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
505 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
506 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
507 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
508 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
509 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
510 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
511 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
512 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
513 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
514 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
515 /* 40 UNDEFINED */ hmR0VmxExitPause,
516 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
517 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
518 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
519 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
520 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
521 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ hmR0VmxExitXdtrAccess,
522 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ hmR0VmxExitXdtrAccess,
523 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
524 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
525 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
526 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
527 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
528 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
529 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
530 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
531 /* 56 VMX_EXIT_APIC_WRITE */ hmR0VmxExitErrUndefined,
532 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
533 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
534 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD,
535 /* 60 VMX_EXIT_ENCLS */ hmR0VmxExitErrUndefined,
536 /* 61 VMX_EXIT_RDSEED */ hmR0VmxExitErrUndefined, /* only spurious exits, so undefined */
537 /* 62 VMX_EXIT_PML_FULL */ hmR0VmxExitErrUndefined,
538 /* 63 VMX_EXIT_XSAVES */ hmR0VmxExitSetPendingXcptUD,
539 /* 64 VMX_EXIT_XRSTORS */ hmR0VmxExitSetPendingXcptUD,
540};
541#endif /* HMVMX_USE_FUNCTION_TABLE */
542
543#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
544static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
545{
546 /* 0 */ "(Not Used)",
547 /* 1 */ "VMCALL executed in VMX root operation.",
548 /* 2 */ "VMCLEAR with invalid physical address.",
549 /* 3 */ "VMCLEAR with VMXON pointer.",
550 /* 4 */ "VMLAUNCH with non-clear VMCS.",
551 /* 5 */ "VMRESUME with non-launched VMCS.",
552 /* 6 */ "VMRESUME after VMXOFF",
553 /* 7 */ "VM-entry with invalid control fields.",
554 /* 8 */ "VM-entry with invalid host state fields.",
555 /* 9 */ "VMPTRLD with invalid physical address.",
556 /* 10 */ "VMPTRLD with VMXON pointer.",
557 /* 11 */ "VMPTRLD with incorrect revision identifier.",
558 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
559 /* 13 */ "VMWRITE to read-only VMCS component.",
560 /* 14 */ "(Not Used)",
561 /* 15 */ "VMXON executed in VMX root operation.",
562 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
563 /* 17 */ "VM-entry with non-launched executing VMCS.",
564 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
565 /* 19 */ "VMCALL with non-clear VMCS.",
566 /* 20 */ "VMCALL with invalid VM-exit control fields.",
567 /* 21 */ "(Not Used)",
568 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
569 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
570 /* 24 */ "VMCALL with invalid SMM-monitor features.",
571 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
572 /* 26 */ "VM-entry with events blocked by MOV SS.",
573 /* 27 */ "(Not Used)",
574 /* 28 */ "Invalid operand to INVEPT/INVVPID."
575};
576#endif /* VBOX_STRICT */
577
578
579/**
580 * Updates the VM's last error record.
581 *
582 * If there was a VMX instruction error, reads the error data from the VMCS and
583 * updates VCPU's last error record as well.
584 *
585 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
586 * Can be NULL if @a rc is not VERR_VMX_UNABLE_TO_START_VM or
587 * VERR_VMX_INVALID_VMCS_FIELD.
588 * @param rc The error code.
589 */
590static void hmR0VmxUpdateErrorRecord(PVMCPU pVCpu, int rc)
591{
592 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
593 || rc == VERR_VMX_UNABLE_TO_START_VM)
594 {
595 AssertPtrReturnVoid(pVCpu);
596 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
597 }
598 pVCpu->CTX_SUFF(pVM)->hm.s.rcInit = rc;
599}
600
601
602/**
603 * Reads the VM-entry interruption-information field from the VMCS into the VMX
604 * transient structure.
605 *
606 * @returns VBox status code.
607 * @param pVmxTransient Pointer to the VMX transient structure.
608 *
609 * @remarks No-long-jump zone!!!
610 */
611DECLINLINE(int) hmR0VmxReadEntryIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
612{
613 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
614 AssertRCReturn(rc, rc);
615 return VINF_SUCCESS;
616}
617
618#ifdef VBOX_STRICT
619/**
620 * Reads the VM-entry exception error code field from the VMCS into
621 * the VMX transient structure.
622 *
623 * @returns VBox status code.
624 * @param pVmxTransient Pointer to the VMX transient structure.
625 *
626 * @remarks No-long-jump zone!!!
627 */
628DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
629{
630 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
631 AssertRCReturn(rc, rc);
632 return VINF_SUCCESS;
633}
634
635
636/**
637 * Reads the VM-entry exception error code field from the VMCS into
638 * the VMX transient structure.
639 *
640 * @returns VBox status code.
641 * @param pVmxTransient Pointer to the VMX transient structure.
642 *
643 * @remarks No-long-jump zone!!!
644 */
645DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
646{
647 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
648 AssertRCReturn(rc, rc);
649 return VINF_SUCCESS;
650}
651#endif /* VBOX_STRICT */
652
653
654/**
655 * Reads the VM-exit interruption-information field from the VMCS into the VMX
656 * transient structure.
657 *
658 * @returns VBox status code.
659 * @param pVmxTransient Pointer to the VMX transient structure.
660 */
661DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
662{
663 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
664 {
665 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
666 AssertRCReturn(rc,rc);
667 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
668 }
669 return VINF_SUCCESS;
670}
671
672
673/**
674 * Reads the VM-exit interruption error code from the VMCS into the VMX
675 * transient structure.
676 *
677 * @returns VBox status code.
678 * @param pVmxTransient Pointer to the VMX transient structure.
679 */
680DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
681{
682 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
683 {
684 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
685 AssertRCReturn(rc, rc);
686 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
687 }
688 return VINF_SUCCESS;
689}
690
691
692/**
693 * Reads the VM-exit instruction length field from the VMCS into the VMX
694 * transient structure.
695 *
696 * @returns VBox status code.
697 * @param pVmxTransient Pointer to the VMX transient structure.
698 */
699DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
700{
701 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
702 {
703 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
704 AssertRCReturn(rc, rc);
705 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
706 }
707 return VINF_SUCCESS;
708}
709
710
711/**
712 * Reads the VM-exit instruction-information field from the VMCS into
713 * the VMX transient structure.
714 *
715 * @returns VBox status code.
716 * @param pVmxTransient Pointer to the VMX transient structure.
717 */
718DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient)
719{
720 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
721 {
722 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
723 AssertRCReturn(rc, rc);
724 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
725 }
726 return VINF_SUCCESS;
727}
728
729
730/**
731 * Reads the VM-exit Qualification from the VMCS into the VMX transient structure.
732 *
733 * @returns VBox status code.
734 * @param pVCpu The cross context virtual CPU structure of the
735 * calling EMT. (Required for the VMCS cache case.)
736 * @param pVmxTransient Pointer to the VMX transient structure.
737 */
738DECLINLINE(int) hmR0VmxReadExitQualVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
739{
740 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
741 {
742 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual); NOREF(pVCpu);
743 AssertRCReturn(rc, rc);
744 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
745 }
746 return VINF_SUCCESS;
747}
748
749
750/**
751 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
752 *
753 * @returns VBox status code.
754 * @param pVCpu The cross context virtual CPU structure of the
755 * calling EMT. (Required for the VMCS cache case.)
756 * @param pVmxTransient Pointer to the VMX transient structure.
757 */
758DECLINLINE(int) hmR0VmxReadGuestLinearAddrVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
759{
760 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
761 {
762 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr); NOREF(pVCpu);
763 AssertRCReturn(rc, rc);
764 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
765 }
766 return VINF_SUCCESS;
767}
768
769
770/**
771 * Reads the IDT-vectoring information field from the VMCS into the VMX
772 * transient structure.
773 *
774 * @returns VBox status code.
775 * @param pVmxTransient Pointer to the VMX transient structure.
776 *
777 * @remarks No-long-jump zone!!!
778 */
779DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
780{
781 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
782 {
783 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
784 AssertRCReturn(rc, rc);
785 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
786 }
787 return VINF_SUCCESS;
788}
789
790
791/**
792 * Reads the IDT-vectoring error code from the VMCS into the VMX
793 * transient structure.
794 *
795 * @returns VBox status code.
796 * @param pVmxTransient Pointer to the VMX transient structure.
797 */
798DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
799{
800 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
801 {
802 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
803 AssertRCReturn(rc, rc);
804 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
805 }
806 return VINF_SUCCESS;
807}
808
809
810/**
811 * Enters VMX root mode operation on the current CPU.
812 *
813 * @returns VBox status code.
814 * @param pVM The cross context VM structure. Can be
815 * NULL, after a resume.
816 * @param HCPhysCpuPage Physical address of the VMXON region.
817 * @param pvCpuPage Pointer to the VMXON region.
818 */
819static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
820{
821 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
822 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
823 Assert(pvCpuPage);
824 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
825
826 if (pVM)
827 {
828 /* Write the VMCS revision dword to the VMXON region. */
829 *(uint32_t *)pvCpuPage = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID);
830 }
831
832 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */
833 RTCCUINTREG fEFlags = ASMIntDisableFlags();
834
835 /* Enable the VMX bit in CR4 if necessary. */
836 RTCCUINTREG uOldCr4 = SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX);
837
838 /* Enter VMX root mode. */
839 int rc = VMXEnable(HCPhysCpuPage);
840 if (RT_FAILURE(rc))
841 {
842 if (!(uOldCr4 & X86_CR4_VMXE))
843 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
844
845 if (pVM)
846 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
847 }
848
849 /* Restore interrupts. */
850 ASMSetFlags(fEFlags);
851 return rc;
852}
853
854
855/**
856 * Exits VMX root mode operation on the current CPU.
857 *
858 * @returns VBox status code.
859 */
860static int hmR0VmxLeaveRootMode(void)
861{
862 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
863
864 /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */
865 RTCCUINTREG fEFlags = ASMIntDisableFlags();
866
867 /* If we're for some reason not in VMX root mode, then don't leave it. */
868 RTCCUINTREG uHostCR4 = ASMGetCR4();
869
870 int rc;
871 if (uHostCR4 & X86_CR4_VMXE)
872 {
873 /* Exit VMX root mode and clear the VMX bit in CR4. */
874 VMXDisable();
875 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
876 rc = VINF_SUCCESS;
877 }
878 else
879 rc = VERR_VMX_NOT_IN_VMX_ROOT_MODE;
880
881 /* Restore interrupts. */
882 ASMSetFlags(fEFlags);
883 return rc;
884}
885
886
887/**
888 * Allocates and maps one physically contiguous page. The allocated page is
889 * zero'd out. (Used by various VT-x structures).
890 *
891 * @returns IPRT status code.
892 * @param pMemObj Pointer to the ring-0 memory object.
893 * @param ppVirt Where to store the virtual address of the
894 * allocation.
895 * @param pHCPhys Where to store the physical address of the
896 * allocation.
897 */
898static int hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
899{
900 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
901 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
902 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
903
904 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
905 if (RT_FAILURE(rc))
906 return rc;
907 *ppVirt = RTR0MemObjAddress(*pMemObj);
908 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
909 ASMMemZero32(*ppVirt, PAGE_SIZE);
910 return VINF_SUCCESS;
911}
912
913
914/**
915 * Frees and unmaps an allocated physical page.
916 *
917 * @param pMemObj Pointer to the ring-0 memory object.
918 * @param ppVirt Where to re-initialize the virtual address of
919 * allocation as 0.
920 * @param pHCPhys Where to re-initialize the physical address of the
921 * allocation as 0.
922 */
923static void hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
924{
925 AssertPtr(pMemObj);
926 AssertPtr(ppVirt);
927 AssertPtr(pHCPhys);
928 if (*pMemObj != NIL_RTR0MEMOBJ)
929 {
930 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
931 AssertRC(rc);
932 *pMemObj = NIL_RTR0MEMOBJ;
933 *ppVirt = 0;
934 *pHCPhys = 0;
935 }
936}
937
938
939/**
940 * Worker function to free VT-x related structures.
941 *
942 * @returns IPRT status code.
943 * @param pVM The cross context VM structure.
944 */
945static void hmR0VmxStructsFree(PVM pVM)
946{
947 for (VMCPUID i = 0; i < pVM->cCpus; i++)
948 {
949 PVMCPU pVCpu = &pVM->aCpus[i];
950 AssertPtr(pVCpu);
951
952 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
953 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
954
955 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
956 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
957
958 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
959 }
960
961 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
962#ifdef VBOX_WITH_CRASHDUMP_MAGIC
963 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
964#endif
965}
966
967
968/**
969 * Worker function to allocate VT-x related VM structures.
970 *
971 * @returns IPRT status code.
972 * @param pVM The cross context VM structure.
973 */
974static int hmR0VmxStructsAlloc(PVM pVM)
975{
976 /*
977 * Initialize members up-front so we can cleanup properly on allocation failure.
978 */
979#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
980 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
981 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
982 pVM->hm.s.vmx.HCPhys##a_Name = 0;
983
984#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
985 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
986 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
987 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
988
989#ifdef VBOX_WITH_CRASHDUMP_MAGIC
990 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
991#endif
992 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
993
994 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
995 for (VMCPUID i = 0; i < pVM->cCpus; i++)
996 {
997 PVMCPU pVCpu = &pVM->aCpus[i];
998 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
999 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
1000 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
1001 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
1002 }
1003#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
1004#undef VMXLOCAL_INIT_VM_MEMOBJ
1005
1006 /* The VMCS size cannot be more than 4096 bytes. See Intel spec. Appendix A.1 "Basic VMX Information". */
1007 AssertReturnStmt(RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_SIZE) <= PAGE_SIZE,
1008 (&pVM->aCpus[0])->hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE,
1009 VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO);
1010
1011 /*
1012 * Allocate all the VT-x structures.
1013 */
1014 int rc = VINF_SUCCESS;
1015#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1016 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
1017 if (RT_FAILURE(rc))
1018 goto cleanup;
1019 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
1020 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
1021#endif
1022
1023 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
1024 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
1025 {
1026 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
1027 &pVM->hm.s.vmx.HCPhysApicAccess);
1028 if (RT_FAILURE(rc))
1029 goto cleanup;
1030 }
1031
1032 /*
1033 * Initialize per-VCPU VT-x structures.
1034 */
1035 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1036 {
1037 PVMCPU pVCpu = &pVM->aCpus[i];
1038 AssertPtr(pVCpu);
1039
1040 /* Allocate the VM control structure (VMCS). */
1041 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
1042 if (RT_FAILURE(rc))
1043 goto cleanup;
1044
1045 /* Get the allocated virtual-APIC page from the APIC device for transparent TPR accesses. */
1046 if ( PDMHasApic(pVM)
1047 && (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW))
1048 {
1049 rc = APICGetApicPageForCpu(pVCpu, &pVCpu->hm.s.vmx.HCPhysVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
1050 NULL /* pR3Ptr */, NULL /* pRCPtr */);
1051 if (RT_FAILURE(rc))
1052 goto cleanup;
1053 }
1054
1055 /*
1056 * Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for
1057 * transparent accesses of specific MSRs.
1058 *
1059 * If the condition for enabling MSR bitmaps changes here, don't forget to
1060 * update HMAreMsrBitmapsAvailable().
1061 */
1062 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
1063 {
1064 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
1065 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1066 if (RT_FAILURE(rc))
1067 goto cleanup;
1068 ASMMemFill32(pVCpu->hm.s.vmx.pvMsrBitmap, PAGE_SIZE, UINT32_C(0xffffffff));
1069 }
1070
1071 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
1072 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
1073 if (RT_FAILURE(rc))
1074 goto cleanup;
1075
1076 /* Allocate the VM-exit MSR-load page for the host MSRs. */
1077 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
1078 if (RT_FAILURE(rc))
1079 goto cleanup;
1080 }
1081
1082 return VINF_SUCCESS;
1083
1084cleanup:
1085 hmR0VmxStructsFree(pVM);
1086 return rc;
1087}
1088
1089
1090/**
1091 * Does global VT-x initialization (called during module initialization).
1092 *
1093 * @returns VBox status code.
1094 */
1095VMMR0DECL(int) VMXR0GlobalInit(void)
1096{
1097#ifdef HMVMX_USE_FUNCTION_TABLE
1098 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
1099# ifdef VBOX_STRICT
1100 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
1101 Assert(g_apfnVMExitHandlers[i]);
1102# endif
1103#endif
1104 return VINF_SUCCESS;
1105}
1106
1107
1108/**
1109 * Does global VT-x termination (called during module termination).
1110 */
1111VMMR0DECL(void) VMXR0GlobalTerm()
1112{
1113 /* Nothing to do currently. */
1114}
1115
1116
1117/**
1118 * Sets up and activates VT-x on the current CPU.
1119 *
1120 * @returns VBox status code.
1121 * @param pHostCpu The HM physical-CPU structure.
1122 * @param pVM The cross context VM structure. Can be
1123 * NULL after a host resume operation.
1124 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
1125 * fEnabledByHost is @c true).
1126 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
1127 * @a fEnabledByHost is @c true).
1128 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
1129 * enable VT-x on the host.
1130 * @param pHwvirtMsrs Pointer to the hardware-virtualization MSRs.
1131 */
1132VMMR0DECL(int) VMXR0EnableCpu(PHMPHYSCPU pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
1133 PCSUPHWVIRTMSRS pHwvirtMsrs)
1134{
1135 Assert(pHostCpu);
1136 Assert(pHwvirtMsrs);
1137 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1138
1139 /* Enable VT-x if it's not already enabled by the host. */
1140 if (!fEnabledByHost)
1141 {
1142 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
1143 if (RT_FAILURE(rc))
1144 return rc;
1145 }
1146
1147 /*
1148 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been
1149 * using EPTPs) so we don't retain any stale guest-physical mappings which won't get
1150 * invalidated when flushing by VPID.
1151 */
1152 if (pHwvirtMsrs->u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1153 {
1154 hmR0VmxFlushEpt(NULL /* pVCpu */, VMXTLBFLUSHEPT_ALL_CONTEXTS);
1155 pHostCpu->fFlushAsidBeforeUse = false;
1156 }
1157 else
1158 pHostCpu->fFlushAsidBeforeUse = true;
1159
1160 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
1161 ++pHostCpu->cTlbFlushes;
1162
1163 return VINF_SUCCESS;
1164}
1165
1166
1167/**
1168 * Deactivates VT-x on the current CPU.
1169 *
1170 * @returns VBox status code.
1171 * @param pvCpuPage Pointer to the VMXON region.
1172 * @param HCPhysCpuPage Physical address of the VMXON region.
1173 *
1174 * @remarks This function should never be called when SUPR0EnableVTx() or
1175 * similar was used to enable VT-x on the host.
1176 */
1177VMMR0DECL(int) VMXR0DisableCpu(void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
1178{
1179 RT_NOREF2(pvCpuPage, HCPhysCpuPage);
1180
1181 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1182 return hmR0VmxLeaveRootMode();
1183}
1184
1185
1186/**
1187 * Sets the permission bits for the specified MSR in the MSR bitmap.
1188 *
1189 * @param pVCpu The cross context virtual CPU structure.
1190 * @param uMsr The MSR value.
1191 * @param enmRead Whether reading this MSR causes a VM-exit.
1192 * @param enmWrite Whether writing this MSR causes a VM-exit.
1193 */
1194static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
1195{
1196 int32_t iBit;
1197 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1198
1199 /*
1200 * MSR Layout:
1201 * Byte index MSR range Interpreted as
1202 * 0x000 - 0x3ff 0x00000000 - 0x00001fff Low MSR read bits.
1203 * 0x400 - 0x7ff 0xc0000000 - 0xc0001fff High MSR read bits.
1204 * 0x800 - 0xbff 0x00000000 - 0x00001fff Low MSR write bits.
1205 * 0xc00 - 0xfff 0xc0000000 - 0xc0001fff High MSR write bits.
1206 *
1207 * A bit corresponding to an MSR within the above range causes a VM-exit
1208 * if the bit is 1 on executions of RDMSR/WRMSR.
1209 *
1210 * If an MSR falls out of the MSR range, it always cause a VM-exit.
1211 *
1212 * See Intel spec. 24.6.9 "MSR-Bitmap Address".
1213 */
1214 if (uMsr <= 0x00001fff)
1215 iBit = uMsr;
1216 else if (uMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
1217 {
1218 iBit = uMsr - UINT32_C(0xc0000000);
1219 pbMsrBitmap += 0x400;
1220 }
1221 else
1222 AssertMsgFailedReturnVoid(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1223
1224 Assert(iBit <= 0x1fff);
1225 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
1226 ASMBitSet(pbMsrBitmap, iBit);
1227 else
1228 ASMBitClear(pbMsrBitmap, iBit);
1229
1230 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
1231 ASMBitSet(pbMsrBitmap + 0x800, iBit);
1232 else
1233 ASMBitClear(pbMsrBitmap + 0x800, iBit);
1234}
1235
1236
1237/**
1238 * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
1239 * area.
1240 *
1241 * @returns VBox status code.
1242 * @param pVCpu The cross context virtual CPU structure.
1243 * @param cMsrs The number of MSRs.
1244 */
1245static int hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs)
1246{
1247 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
1248 uint64_t const uVmxMiscMsr = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc;
1249 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(uVmxMiscMsr);
1250 if (RT_UNLIKELY(cMsrs > cMaxSupportedMsrs))
1251 {
1252 LogRel(("CPU auto-load/store MSR count in VMCS exceeded cMsrs=%u Supported=%u.\n", cMsrs, cMaxSupportedMsrs));
1253 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
1254 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1255 }
1256
1257 /* Update number of guest MSRs to load/store across the world-switch. */
1258 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs);
1259 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs);
1260
1261 /* Update number of host MSRs to load after the world-switch. Identical to guest-MSR count as it's always paired. */
1262 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs);
1263 AssertRCReturn(rc, rc);
1264
1265 /* Update the VCPU's copy of the MSR count. */
1266 pVCpu->hm.s.vmx.cMsrs = cMsrs;
1267
1268 return VINF_SUCCESS;
1269}
1270
1271
1272/**
1273 * Adds a new (or updates the value of an existing) guest/host MSR
1274 * pair to be swapped during the world-switch as part of the
1275 * auto-load/store MSR area in the VMCS.
1276 *
1277 * @returns VBox status code.
1278 * @param pVCpu The cross context virtual CPU structure.
1279 * @param uMsr The MSR.
1280 * @param uGuestMsrValue Value of the guest MSR.
1281 * @param fUpdateHostMsr Whether to update the value of the host MSR if
1282 * necessary.
1283 * @param pfAddedAndUpdated Where to store whether the MSR was added -and-
1284 * its value was updated. Optional, can be NULL.
1285 */
1286static int hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr,
1287 bool *pfAddedAndUpdated)
1288{
1289 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1290 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1291 uint32_t i;
1292 for (i = 0; i < cMsrs; i++)
1293 {
1294 if (pGuestMsr->u32Msr == uMsr)
1295 break;
1296 pGuestMsr++;
1297 }
1298
1299 bool fAdded = false;
1300 if (i == cMsrs)
1301 {
1302 ++cMsrs;
1303 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1304 AssertMsgRCReturn(rc, ("hmR0VmxAddAutoLoadStoreMsr: Insufficient space to add MSR %u\n", uMsr), rc);
1305
1306 /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */
1307 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
1308 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1309
1310 fAdded = true;
1311 }
1312
1313 /* Update the MSR values in the auto-load/store MSR area. */
1314 pGuestMsr->u32Msr = uMsr;
1315 pGuestMsr->u64Value = uGuestMsrValue;
1316
1317 /* Create/update the MSR slot in the host MSR area. */
1318 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1319 pHostMsr += i;
1320 pHostMsr->u32Msr = uMsr;
1321
1322 /*
1323 * Update the host MSR only when requested by the caller AND when we're
1324 * adding it to the auto-load/store area. Otherwise, it would have been
1325 * updated by hmR0VmxExportHostMsrs(). We do this for performance reasons.
1326 */
1327 bool fUpdatedMsrValue = false;
1328 if ( fAdded
1329 && fUpdateHostMsr)
1330 {
1331 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1332 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1333 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1334 fUpdatedMsrValue = true;
1335 }
1336
1337 if (pfAddedAndUpdated)
1338 *pfAddedAndUpdated = fUpdatedMsrValue;
1339 return VINF_SUCCESS;
1340}
1341
1342
1343/**
1344 * Removes a guest/host MSR pair to be swapped during the world-switch from the
1345 * auto-load/store MSR area in the VMCS.
1346 *
1347 * @returns VBox status code.
1348 * @param pVCpu The cross context virtual CPU structure.
1349 * @param uMsr The MSR.
1350 */
1351static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr)
1352{
1353 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1354 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1355 for (uint32_t i = 0; i < cMsrs; i++)
1356 {
1357 /* Find the MSR. */
1358 if (pGuestMsr->u32Msr == uMsr)
1359 {
1360 /* If it's the last MSR, simply reduce the count. */
1361 if (i == cMsrs - 1)
1362 {
1363 --cMsrs;
1364 break;
1365 }
1366
1367 /* Remove it by swapping the last MSR in place of it, and reducing the count. */
1368 PVMXAUTOMSR pLastGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1369 pLastGuestMsr += cMsrs - 1;
1370 pGuestMsr->u32Msr = pLastGuestMsr->u32Msr;
1371 pGuestMsr->u64Value = pLastGuestMsr->u64Value;
1372
1373 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1374 PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1375 pLastHostMsr += cMsrs - 1;
1376 pHostMsr->u32Msr = pLastHostMsr->u32Msr;
1377 pHostMsr->u64Value = pLastHostMsr->u64Value;
1378 --cMsrs;
1379 break;
1380 }
1381 pGuestMsr++;
1382 }
1383
1384 /* Update the VMCS if the count changed (meaning the MSR was found). */
1385 if (cMsrs != pVCpu->hm.s.vmx.cMsrs)
1386 {
1387 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1388 AssertRCReturn(rc, rc);
1389
1390 /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
1391 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
1392 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
1393
1394 Log4Func(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs));
1395 return VINF_SUCCESS;
1396 }
1397
1398 return VERR_NOT_FOUND;
1399}
1400
1401
1402/**
1403 * Checks if the specified guest MSR is part of the auto-load/store area in
1404 * the VMCS.
1405 *
1406 * @returns true if found, false otherwise.
1407 * @param pVCpu The cross context virtual CPU structure.
1408 * @param uMsr The MSR to find.
1409 */
1410static bool hmR0VmxIsAutoLoadStoreGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1411{
1412 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1413 uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs;
1414
1415 for (uint32_t i = 0; i < cMsrs; i++, pGuestMsr++)
1416 {
1417 if (pGuestMsr->u32Msr == uMsr)
1418 return true;
1419 }
1420 return false;
1421}
1422
1423
1424/**
1425 * Updates the value of all host MSRs in the auto-load/store area in the VMCS.
1426 *
1427 * @param pVCpu The cross context virtual CPU structure.
1428 *
1429 * @remarks No-long-jump zone!!!
1430 */
1431static void hmR0VmxUpdateAutoLoadStoreHostMsrs(PVMCPU pVCpu)
1432{
1433 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1434 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1435 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1436 uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs;
1437
1438 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1439 {
1440 AssertReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr);
1441
1442 /*
1443 * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.
1444 * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}.
1445 */
1446 if (pHostMsr->u32Msr == MSR_K6_EFER)
1447 pHostMsr->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostEfer;
1448 else
1449 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1450 }
1451
1452 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
1453}
1454
1455
1456/**
1457 * Saves a set of host MSRs to allow read/write passthru access to the guest and
1458 * perform lazy restoration of the host MSRs while leaving VT-x.
1459 *
1460 * @param pVCpu The cross context virtual CPU structure.
1461 *
1462 * @remarks No-long-jump zone!!!
1463 */
1464static void hmR0VmxLazySaveHostMsrs(PVMCPU pVCpu)
1465{
1466 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1467
1468 /*
1469 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls().
1470 */
1471 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST))
1472 {
1473 Assert(!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)); /* Guest MSRs better not be loaded now. */
1474#if HC_ARCH_BITS == 64
1475 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
1476 {
1477 pVCpu->hm.s.vmx.u64HostLStarMsr = ASMRdMsr(MSR_K8_LSTAR);
1478 pVCpu->hm.s.vmx.u64HostStarMsr = ASMRdMsr(MSR_K6_STAR);
1479 pVCpu->hm.s.vmx.u64HostSFMaskMsr = ASMRdMsr(MSR_K8_SF_MASK);
1480 pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1481 }
1482#endif
1483 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;
1484 }
1485}
1486
1487
1488/**
1489 * Checks whether the MSR belongs to the set of guest MSRs that we restore
1490 * lazily while leaving VT-x.
1491 *
1492 * @returns true if it does, false otherwise.
1493 * @param pVCpu The cross context virtual CPU structure.
1494 * @param uMsr The MSR to check.
1495 */
1496static bool hmR0VmxIsLazyGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1497{
1498 NOREF(pVCpu);
1499#if HC_ARCH_BITS == 64
1500 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
1501 {
1502 switch (uMsr)
1503 {
1504 case MSR_K8_LSTAR:
1505 case MSR_K6_STAR:
1506 case MSR_K8_SF_MASK:
1507 case MSR_K8_KERNEL_GS_BASE:
1508 return true;
1509 }
1510 }
1511#else
1512 RT_NOREF(pVCpu, uMsr);
1513#endif
1514 return false;
1515}
1516
1517
1518/**
1519 * Loads a set of guests MSRs to allow read/passthru to the guest.
1520 *
1521 * The name of this function is slightly confusing. This function does NOT
1522 * postpone loading, but loads the MSR right now. "hmR0VmxLazy" is simply a
1523 * common prefix for functions dealing with "lazy restoration" of the shared
1524 * MSRs.
1525 *
1526 * @param pVCpu The cross context virtual CPU structure.
1527 *
1528 * @remarks No-long-jump zone!!!
1529 */
1530static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu)
1531{
1532 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1533 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1534
1535 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1536#if HC_ARCH_BITS == 64
1537 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
1538 {
1539 /*
1540 * If the guest MSRs are not loaded -and- if all the guest MSRs are identical
1541 * to the MSRs on the CPU (which are the saved host MSRs, see assertion above) then
1542 * we can skip a few MSR writes.
1543 *
1544 * Otherwise, it implies either 1. they're not loaded, or 2. they're loaded but the
1545 * guest MSR values in the guest-CPU context might be different to what's currently
1546 * loaded in the CPU. In either case, we need to write the new guest MSR values to the
1547 * CPU, see @bugref{8728}.
1548 */
1549 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1550 if ( !(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1551 && pCtx->msrKERNELGSBASE == pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr
1552 && pCtx->msrLSTAR == pVCpu->hm.s.vmx.u64HostLStarMsr
1553 && pCtx->msrSTAR == pVCpu->hm.s.vmx.u64HostStarMsr
1554 && pCtx->msrSFMASK == pVCpu->hm.s.vmx.u64HostSFMaskMsr)
1555 {
1556#ifdef VBOX_STRICT
1557 Assert(ASMRdMsr(MSR_K8_KERNEL_GS_BASE) == pCtx->msrKERNELGSBASE);
1558 Assert(ASMRdMsr(MSR_K8_LSTAR) == pCtx->msrLSTAR);
1559 Assert(ASMRdMsr(MSR_K6_STAR) == pCtx->msrSTAR);
1560 Assert(ASMRdMsr(MSR_K8_SF_MASK) == pCtx->msrSFMASK);
1561#endif
1562 }
1563 else
1564 {
1565 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE);
1566 ASMWrMsr(MSR_K8_LSTAR, pCtx->msrLSTAR);
1567 ASMWrMsr(MSR_K6_STAR, pCtx->msrSTAR);
1568 ASMWrMsr(MSR_K8_SF_MASK, pCtx->msrSFMASK);
1569 }
1570 }
1571#endif
1572 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;
1573}
1574
1575
1576/**
1577 * Performs lazy restoration of the set of host MSRs if they were previously
1578 * loaded with guest MSR values.
1579 *
1580 * @param pVCpu The cross context virtual CPU structure.
1581 *
1582 * @remarks No-long-jump zone!!!
1583 * @remarks The guest MSRs should have been saved back into the guest-CPU
1584 * context by hmR0VmxImportGuestState()!!!
1585 */
1586static void hmR0VmxLazyRestoreHostMsrs(PVMCPU pVCpu)
1587{
1588 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1589 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1590
1591 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1592 {
1593 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1594#if HC_ARCH_BITS == 64
1595 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
1596 {
1597 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm.s.vmx.u64HostLStarMsr);
1598 ASMWrMsr(MSR_K6_STAR, pVCpu->hm.s.vmx.u64HostStarMsr);
1599 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm.s.vmx.u64HostSFMaskMsr);
1600 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr);
1601 }
1602#endif
1603 }
1604 pVCpu->hm.s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST);
1605}
1606
1607
1608/**
1609 * Verifies that our cached values of the VMCS fields are all consistent with
1610 * what's actually present in the VMCS.
1611 *
1612 * @returns VBox status code.
1613 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1614 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1615 * VMCS content. HMCPU error-field is
1616 * updated, see VMX_VCI_XXX.
1617 * @param pVCpu The cross context virtual CPU structure.
1618 */
1619static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu)
1620{
1621 uint32_t u32Val;
1622 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
1623 AssertRCReturn(rc, rc);
1624 AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u32EntryCtls == u32Val,
1625 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.Ctls.u32EntryCtls, u32Val),
1626 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_ENTRY,
1627 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1628
1629 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);
1630 AssertRCReturn(rc, rc);
1631 AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u32ExitCtls == u32Val,
1632 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.Ctls.u32ExitCtls, u32Val),
1633 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_EXIT,
1634 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1635
1636 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1637 AssertRCReturn(rc, rc);
1638 AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u32PinCtls == u32Val,
1639 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.Ctls.u32PinCtls, u32Val),
1640 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1641 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1642
1643 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1644 AssertRCReturn(rc, rc);
1645 AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u32ProcCtls == u32Val,
1646 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.Ctls.u32ProcCtls, u32Val),
1647 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1648 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1649
1650 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1651 {
1652 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1653 AssertRCReturn(rc, rc);
1654 AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 == u32Val,
1655 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.Ctls.u32ProcCtls2, u32Val),
1656 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1657 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1658 }
1659
1660 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1661 AssertRCReturn(rc, rc);
1662 AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u32XcptBitmap == u32Val,
1663 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.Ctls.u32XcptBitmap, u32Val),
1664 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1665 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1666
1667 uint64_t u64Val;
1668 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1669 AssertRCReturn(rc, rc);
1670 AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u64TscOffset == u64Val,
1671 ("Cache=%#RX64 VMCS=%#RX64\n", pVCpu->hm.s.vmx.Ctls.u64TscOffset, u64Val),
1672 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1673 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1674
1675 return VINF_SUCCESS;
1676}
1677
1678
1679#ifdef VBOX_STRICT
1680/**
1681 * Verifies that our cached host EFER value has not changed
1682 * since we cached it.
1683 *
1684 * @param pVCpu The cross context virtual CPU structure.
1685 */
1686static void hmR0VmxCheckHostEferMsr(PVMCPU pVCpu)
1687{
1688 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1689
1690 if (pVCpu->hm.s.vmx.Ctls.u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
1691 {
1692 uint64_t u64Val;
1693 int rc = VMXReadVmcs64(VMX_VMCS64_HOST_EFER_FULL, &u64Val);
1694 AssertRC(rc);
1695
1696 uint64_t u64HostEferMsr = ASMRdMsr(MSR_K6_EFER);
1697 AssertMsgReturnVoid(u64HostEferMsr == u64Val, ("u64HostEferMsr=%#RX64 u64Val=%#RX64\n", u64HostEferMsr, u64Val));
1698 }
1699}
1700
1701
1702/**
1703 * Verifies whether the guest/host MSR pairs in the auto-load/store area in the
1704 * VMCS are correct.
1705 *
1706 * @param pVCpu The cross context virtual CPU structure.
1707 */
1708static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu)
1709{
1710 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1711
1712 /* Verify MSR counts in the VMCS are what we think it should be. */
1713 uint32_t cMsrs;
1714 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1715 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1716
1717 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cMsrs); AssertRC(rc);
1718 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1719
1720 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1721 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1722
1723 PCVMXAUTOMSR pHostMsr = (PCVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1724 PCVMXAUTOMSR pGuestMsr = (PCVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1725 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1726 {
1727 /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */
1728 AssertMsgReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr, ("HostMsr=%#RX32 GuestMsr=%#RX32 cMsrs=%u\n", pHostMsr->u32Msr,
1729 pGuestMsr->u32Msr, cMsrs));
1730
1731 uint64_t u64Msr = ASMRdMsr(pHostMsr->u32Msr);
1732 AssertMsgReturnVoid(pHostMsr->u64Value == u64Msr, ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
1733 pHostMsr->u32Msr, pHostMsr->u64Value, u64Msr, cMsrs));
1734
1735 /* Verify that the permissions are as expected in the MSR bitmap. */
1736 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
1737 {
1738 VMXMSREXITREAD enmRead;
1739 VMXMSREXITWRITE enmWrite;
1740 rc = HMGetVmxMsrPermission(pVCpu->hm.s.vmx.pvMsrBitmap, pGuestMsr->u32Msr, &enmRead, &enmWrite);
1741 AssertMsgReturnVoid(rc == VINF_SUCCESS, ("HMGetVmxMsrPermission! failed. rc=%Rrc\n", rc));
1742 if (pGuestMsr->u32Msr == MSR_K6_EFER)
1743 {
1744 AssertMsgReturnVoid(enmRead == VMXMSREXIT_INTERCEPT_READ, ("Passthru read for EFER!?\n"));
1745 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_INTERCEPT_WRITE, ("Passthru write for EFER!?\n"));
1746 }
1747 else
1748 {
1749 AssertMsgReturnVoid(enmRead == VMXMSREXIT_PASSTHRU_READ, ("u32Msr=%#RX32 cMsrs=%u No passthru read!\n",
1750 pGuestMsr->u32Msr, cMsrs));
1751 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_PASSTHRU_WRITE, ("u32Msr=%#RX32 cMsrs=%u No passthru write!\n",
1752 pGuestMsr->u32Msr, cMsrs));
1753 }
1754 }
1755 }
1756}
1757#endif /* VBOX_STRICT */
1758
1759
1760/**
1761 * Flushes the TLB using EPT.
1762 *
1763 * @returns VBox status code.
1764 * @param pVCpu The cross context virtual CPU structure of the calling
1765 * EMT. Can be NULL depending on @a enmTlbFlush.
1766 * @param enmTlbFlush Type of flush.
1767 *
1768 * @remarks Caller is responsible for making sure this function is called only
1769 * when NestedPaging is supported and providing @a enmTlbFlush that is
1770 * supported by the CPU.
1771 * @remarks Can be called with interrupts disabled.
1772 */
1773static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXTLBFLUSHEPT enmTlbFlush)
1774{
1775 uint64_t au64Descriptor[2];
1776 if (enmTlbFlush == VMXTLBFLUSHEPT_ALL_CONTEXTS)
1777 au64Descriptor[0] = 0;
1778 else
1779 {
1780 Assert(pVCpu);
1781 au64Descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1782 }
1783 au64Descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
1784
1785 int rc = VMXR0InvEPT(enmTlbFlush, &au64Descriptor[0]);
1786 AssertMsg(rc == VINF_SUCCESS,
1787 ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmTlbFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0, rc));
1788
1789 if ( RT_SUCCESS(rc)
1790 && pVCpu)
1791 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1792}
1793
1794
1795/**
1796 * Flushes the TLB using VPID.
1797 *
1798 * @returns VBox status code.
1799 * @param pVCpu The cross context virtual CPU structure of the calling
1800 * EMT. Can be NULL depending on @a enmTlbFlush.
1801 * @param enmTlbFlush Type of flush.
1802 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1803 * on @a enmTlbFlush).
1804 *
1805 * @remarks Can be called with interrupts disabled.
1806 */
1807static void hmR0VmxFlushVpid(PVMCPU pVCpu, VMXTLBFLUSHVPID enmTlbFlush, RTGCPTR GCPtr)
1808{
1809 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid);
1810
1811 uint64_t au64Descriptor[2];
1812 if (enmTlbFlush == VMXTLBFLUSHVPID_ALL_CONTEXTS)
1813 {
1814 au64Descriptor[0] = 0;
1815 au64Descriptor[1] = 0;
1816 }
1817 else
1818 {
1819 AssertPtr(pVCpu);
1820 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1821 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1822 au64Descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1823 au64Descriptor[1] = GCPtr;
1824 }
1825
1826 int rc = VMXR0InvVPID(enmTlbFlush, &au64Descriptor[0]);
1827 AssertMsg(rc == VINF_SUCCESS,
1828 ("VMXR0InvVPID %#x %u %RGv failed with %Rrc\n", enmTlbFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1829
1830 if ( RT_SUCCESS(rc)
1831 && pVCpu)
1832 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1833 NOREF(rc);
1834}
1835
1836
1837/**
1838 * Invalidates a guest page by guest virtual address. Only relevant for
1839 * EPT/VPID, otherwise there is nothing really to invalidate.
1840 *
1841 * @returns VBox status code.
1842 * @param pVCpu The cross context virtual CPU structure.
1843 * @param GCVirt Guest virtual address of the page to invalidate.
1844 */
1845VMMR0DECL(int) VMXR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
1846{
1847 AssertPtr(pVCpu);
1848 LogFlowFunc(("pVCpu=%p GCVirt=%RGv\n", pVCpu, GCVirt));
1849
1850 bool fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1851 if (!fFlushPending)
1852 {
1853 /*
1854 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for
1855 * the EPT case. See @bugref{6043} and @bugref{6177}.
1856 *
1857 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*()
1858 * as this function maybe called in a loop with individual addresses.
1859 */
1860 PVM pVM = pVCpu->CTX_SUFF(pVM);
1861 if (pVM->hm.s.vmx.fVpid)
1862 {
1863 bool fVpidFlush = RT_BOOL(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
1864
1865#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
1866 /*
1867 * Workaround Erratum BV75, AAJ159 and others that affect several Intel CPUs
1868 * where executing INVVPID outside 64-bit mode does not flush translations of
1869 * 64-bit linear addresses, see @bugref{6208#c72}.
1870 */
1871 if (RT_HI_U32(GCVirt))
1872 fVpidFlush = false;
1873#endif
1874
1875 if (fVpidFlush)
1876 {
1877 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_INDIV_ADDR, GCVirt);
1878 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1879 }
1880 else
1881 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1882 }
1883 else if (pVM->hm.s.fNestedPaging)
1884 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1885 }
1886
1887 return VINF_SUCCESS;
1888}
1889
1890
1891/**
1892 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1893 * case where neither EPT nor VPID is supported by the CPU.
1894 *
1895 * @param pHostCpu The HM physical-CPU structure.
1896 * @param pVCpu The cross context virtual CPU structure.
1897 *
1898 * @remarks Called with interrupts disabled.
1899 */
1900static void hmR0VmxFlushTaggedTlbNone(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
1901{
1902 AssertPtr(pVCpu);
1903 AssertPtr(pHostCpu);
1904
1905 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1906
1907 Assert(pHostCpu->idCpu != NIL_RTCPUID);
1908 pVCpu->hm.s.idLastCpu = pHostCpu->idCpu;
1909 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes;
1910 pVCpu->hm.s.fForceTLBFlush = false;
1911 return;
1912}
1913
1914
1915/**
1916 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1917 *
1918 * @param pHostCpu The HM physical-CPU structure.
1919 * @param pVCpu The cross context virtual CPU structure.
1920 *
1921 * @remarks All references to "ASID" in this function pertains to "VPID" in Intel's
1922 * nomenclature. The reason is, to avoid confusion in compare statements
1923 * since the host-CPU copies are named "ASID".
1924 *
1925 * @remarks Called with interrupts disabled.
1926 */
1927static void hmR0VmxFlushTaggedTlbBoth(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
1928{
1929#ifdef VBOX_WITH_STATISTICS
1930 bool fTlbFlushed = false;
1931# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0)
1932# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \
1933 if (!fTlbFlushed) \
1934 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
1935 } while (0)
1936#else
1937# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0)
1938# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0)
1939#endif
1940
1941 AssertPtr(pVCpu);
1942 AssertPtr(pHostCpu);
1943 Assert(pHostCpu->idCpu != NIL_RTCPUID);
1944
1945 PVM pVM = pVCpu->CTX_SUFF(pVM);
1946 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1947 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1948 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1949
1950 /*
1951 * Force a TLB flush for the first world-switch if the current CPU differs from the one we
1952 * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID
1953 * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we
1954 * cannot reuse the current ASID anymore.
1955 */
1956 if ( pVCpu->hm.s.idLastCpu != pHostCpu->idCpu
1957 || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes)
1958 {
1959 ++pHostCpu->uCurrentAsid;
1960 if (pHostCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1961 {
1962 pHostCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */
1963 pHostCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1964 pHostCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1965 }
1966
1967 pVCpu->hm.s.uCurrentAsid = pHostCpu->uCurrentAsid;
1968 pVCpu->hm.s.idLastCpu = pHostCpu->idCpu;
1969 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes;
1970
1971 /*
1972 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
1973 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
1974 */
1975 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmTlbFlushEpt);
1976 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1977 HMVMX_SET_TAGGED_TLB_FLUSHED();
1978 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1979 }
1980 else if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) /* Check for explicit TLB flushes. */
1981 {
1982 /*
1983 * Changes to the EPT paging structure by VMM requires flushing-by-EPT as the CPU
1984 * creates guest-physical (ie. only EPT-tagged) mappings while traversing the EPT
1985 * tables when EPT is in use. Flushing-by-VPID will only flush linear (only
1986 * VPID-tagged) and combined (EPT+VPID tagged) mappings but not guest-physical
1987 * mappings, see @bugref{6568}.
1988 *
1989 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information".
1990 */
1991 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmTlbFlushEpt);
1992 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1993 HMVMX_SET_TAGGED_TLB_FLUSHED();
1994 }
1995
1996 pVCpu->hm.s.fForceTLBFlush = false;
1997 HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
1998
1999 Assert(pVCpu->hm.s.idLastCpu == pHostCpu->idCpu);
2000 Assert(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes);
2001 AssertMsg(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes,
2002 ("Flush count mismatch for cpu %d (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pHostCpu->cTlbFlushes));
2003 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2004 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu,
2005 pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2006 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2007 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2008
2009 /* Update VMCS with the VPID. */
2010 int rc = VMXWriteVmcs32(VMX_VMCS16_VPID, pVCpu->hm.s.uCurrentAsid);
2011 AssertRC(rc);
2012
2013#undef HMVMX_SET_TAGGED_TLB_FLUSHED
2014}
2015
2016
2017/**
2018 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
2019 *
2020 * @param pHostCpu The HM physical-CPU structure.
2021 * @param pVCpu The cross context virtual CPU structure.
2022 *
2023 * @remarks Called with interrupts disabled.
2024 */
2025static void hmR0VmxFlushTaggedTlbEpt(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
2026{
2027 AssertPtr(pVCpu);
2028 AssertPtr(pHostCpu);
2029 Assert(pHostCpu->idCpu != NIL_RTCPUID);
2030 AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked without NestedPaging."));
2031 AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID."));
2032
2033 /*
2034 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
2035 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
2036 */
2037 if ( pVCpu->hm.s.idLastCpu != pHostCpu->idCpu
2038 || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes)
2039 {
2040 pVCpu->hm.s.fForceTLBFlush = true;
2041 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2042 }
2043
2044 /* Check for explicit TLB flushes. */
2045 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2046 {
2047 pVCpu->hm.s.fForceTLBFlush = true;
2048 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2049 }
2050
2051 pVCpu->hm.s.idLastCpu = pHostCpu->idCpu;
2052 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes;
2053
2054 if (pVCpu->hm.s.fForceTLBFlush)
2055 {
2056 hmR0VmxFlushEpt(pVCpu, pVCpu->CTX_SUFF(pVM)->hm.s.vmx.enmTlbFlushEpt);
2057 pVCpu->hm.s.fForceTLBFlush = false;
2058 }
2059}
2060
2061
2062/**
2063 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
2064 *
2065 * @param pHostCpu The HM physical-CPU structure.
2066 * @param pVCpu The cross context virtual CPU structure.
2067 *
2068 * @remarks Called with interrupts disabled.
2069 */
2070static void hmR0VmxFlushTaggedTlbVpid(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
2071{
2072 AssertPtr(pVCpu);
2073 AssertPtr(pHostCpu);
2074 Assert(pHostCpu->idCpu != NIL_RTCPUID);
2075 AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked without VPID."));
2076 AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging"));
2077
2078 /*
2079 * Force a TLB flush for the first world switch if the current CPU differs from the one we
2080 * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID
2081 * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we
2082 * cannot reuse the current ASID anymore.
2083 */
2084 if ( pVCpu->hm.s.idLastCpu != pHostCpu->idCpu
2085 || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes)
2086 {
2087 pVCpu->hm.s.fForceTLBFlush = true;
2088 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2089 }
2090
2091 /* Check for explicit TLB flushes. */
2092 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2093 {
2094 /*
2095 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see
2096 * hmR0VmxSetupTaggedTlb()) we would need to explicitly flush in this case (add an
2097 * fExplicitFlush = true here and change the pHostCpu->fFlushAsidBeforeUse check below to
2098 * include fExplicitFlush's too) - an obscure corner case.
2099 */
2100 pVCpu->hm.s.fForceTLBFlush = true;
2101 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2102 }
2103
2104 PVM pVM = pVCpu->CTX_SUFF(pVM);
2105 pVCpu->hm.s.idLastCpu = pHostCpu->idCpu;
2106 if (pVCpu->hm.s.fForceTLBFlush)
2107 {
2108 ++pHostCpu->uCurrentAsid;
2109 if (pHostCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
2110 {
2111 pHostCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */
2112 pHostCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
2113 pHostCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
2114 }
2115
2116 pVCpu->hm.s.fForceTLBFlush = false;
2117 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes;
2118 pVCpu->hm.s.uCurrentAsid = pHostCpu->uCurrentAsid;
2119 if (pHostCpu->fFlushAsidBeforeUse)
2120 {
2121 if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
2122 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);
2123 else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_ALL_CONTEXTS)
2124 {
2125 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
2126 pHostCpu->fFlushAsidBeforeUse = false;
2127 }
2128 else
2129 {
2130 /* hmR0VmxSetupTaggedTlb() ensures we never get here. Paranoia. */
2131 AssertMsgFailed(("Unsupported VPID-flush context type.\n"));
2132 }
2133 }
2134 }
2135
2136 AssertMsg(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes,
2137 ("Flush count mismatch for cpu %d (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pHostCpu->cTlbFlushes));
2138 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2139 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu,
2140 pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2141 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2142 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2143
2144 int rc = VMXWriteVmcs32(VMX_VMCS16_VPID, pVCpu->hm.s.uCurrentAsid);
2145 AssertRC(rc);
2146}
2147
2148
2149/**
2150 * Flushes the guest TLB entry based on CPU capabilities.
2151 *
2152 * @param pHostCpu The HM physical-CPU structure.
2153 * @param pVCpu The cross context virtual CPU structure.
2154 *
2155 * @remarks Called with interrupts disabled.
2156 */
2157DECLINLINE(void) hmR0VmxFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
2158{
2159#ifdef HMVMX_ALWAYS_FLUSH_TLB
2160 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2161#endif
2162 PVM pVM = pVCpu->CTX_SUFF(pVM);
2163 switch (pVM->hm.s.vmx.enmTlbFlushType)
2164 {
2165 case VMXTLBFLUSHTYPE_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pHostCpu, pVCpu); break;
2166 case VMXTLBFLUSHTYPE_EPT: hmR0VmxFlushTaggedTlbEpt(pHostCpu, pVCpu); break;
2167 case VMXTLBFLUSHTYPE_VPID: hmR0VmxFlushTaggedTlbVpid(pHostCpu, pVCpu); break;
2168 case VMXTLBFLUSHTYPE_NONE: hmR0VmxFlushTaggedTlbNone(pHostCpu, pVCpu); break;
2169 default:
2170 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
2171 break;
2172 }
2173 /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */
2174}
2175
2176
2177/**
2178 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
2179 * TLB entries from the host TLB before VM-entry.
2180 *
2181 * @returns VBox status code.
2182 * @param pVM The cross context VM structure.
2183 */
2184static int hmR0VmxSetupTaggedTlb(PVM pVM)
2185{
2186 /*
2187 * Determine optimal flush type for Nested Paging.
2188 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
2189 * guest execution (see hmR3InitFinalizeR0()).
2190 */
2191 if (pVM->hm.s.fNestedPaging)
2192 {
2193 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
2194 {
2195 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
2196 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_SINGLE_CONTEXT;
2197 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
2198 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_ALL_CONTEXTS;
2199 else
2200 {
2201 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
2202 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2203 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_FLUSH_TYPE_UNSUPPORTED;
2204 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2205 }
2206
2207 /* Make sure the write-back cacheable memory type for EPT is supported. */
2208 if (RT_UNLIKELY(!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB)))
2209 {
2210 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2211 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_MEM_TYPE_NOT_WB;
2212 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2213 }
2214
2215 /* EPT requires a page-walk length of 4. */
2216 if (RT_UNLIKELY(!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4)))
2217 {
2218 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2219 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_PAGE_WALK_LENGTH_UNSUPPORTED;
2220 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2221 }
2222 }
2223 else
2224 {
2225 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
2226 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2227 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_INVEPT_UNAVAILABLE;
2228 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2229 }
2230 }
2231
2232 /*
2233 * Determine optimal flush type for VPID.
2234 */
2235 if (pVM->hm.s.vmx.fVpid)
2236 {
2237 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
2238 {
2239 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
2240 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_SINGLE_CONTEXT;
2241 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
2242 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_ALL_CONTEXTS;
2243 else
2244 {
2245 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
2246 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2247 LogRelFunc(("Only INDIV_ADDR supported. Ignoring VPID.\n"));
2248 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
2249 LogRelFunc(("Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
2250 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED;
2251 pVM->hm.s.vmx.fVpid = false;
2252 }
2253 }
2254 else
2255 {
2256 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
2257 Log4Func(("VPID supported without INVEPT support. Ignoring VPID.\n"));
2258 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED;
2259 pVM->hm.s.vmx.fVpid = false;
2260 }
2261 }
2262
2263 /*
2264 * Setup the handler for flushing tagged-TLBs.
2265 */
2266 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
2267 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_EPT_VPID;
2268 else if (pVM->hm.s.fNestedPaging)
2269 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_EPT;
2270 else if (pVM->hm.s.vmx.fVpid)
2271 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_VPID;
2272 else
2273 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_NONE;
2274 return VINF_SUCCESS;
2275}
2276
2277
2278/**
2279 * Sets up pin-based VM-execution controls in the VMCS.
2280 *
2281 * @returns VBox status code.
2282 * @param pVCpu The cross context virtual CPU structure.
2283 *
2284 * @remarks We don't really care about optimizing vmwrites here as it's done only
2285 * once per VM and hence we don't care about VMCS-field cache comparisons.
2286 */
2287static int hmR0VmxSetupPinCtls(PVMCPU pVCpu)
2288{
2289 PVM pVM = pVCpu->CTX_SUFF(pVM);
2290 uint32_t fVal = pVM->hm.s.vmx.Msrs.PinCtls.n.allowed0; /* Bits set here must always be set. */
2291 uint32_t const fZap = pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
2292
2293 fVal |= VMX_PIN_CTLS_EXT_INT_EXIT /* External interrupts cause a VM-exit. */
2294 | VMX_PIN_CTLS_NMI_EXIT; /* Non-maskable interrupts (NMIs) cause a VM-exit. */
2295
2296 if (pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_PIN_CTLS_VIRT_NMI)
2297 fVal |= VMX_PIN_CTLS_VIRT_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */
2298
2299 /* Enable the VMX preemption timer. */
2300 if (pVM->hm.s.vmx.fUsePreemptTimer)
2301 {
2302 Assert(pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_PIN_CTLS_PREEMPT_TIMER);
2303 fVal |= VMX_PIN_CTLS_PREEMPT_TIMER;
2304 }
2305
2306#if 0
2307 /* Enable posted-interrupt processing. */
2308 if (pVM->hm.s.fPostedIntrs)
2309 {
2310 Assert(pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_PIN_CTLS_POSTED_INT);
2311 Assert(pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_ACK_EXT_INT);
2312 fVal |= VMX_PIN_CTL_POSTED_INT;
2313 }
2314#endif
2315
2316 if ((fVal & fZap) != fVal)
2317 {
2318 LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
2319 pVM->hm.s.vmx.Msrs.PinCtls.n.allowed0, fVal, fZap));
2320 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
2321 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2322 }
2323
2324 /* Commit it to the VMCS and update our cache. */
2325 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, fVal);
2326 AssertRCReturn(rc, rc);
2327 pVCpu->hm.s.vmx.Ctls.u32PinCtls = fVal;
2328
2329 return VINF_SUCCESS;
2330}
2331
2332
2333/**
2334 * Sets up secondary processor-based VM-execution controls in the VMCS.
2335 *
2336 * @returns VBox status code.
2337 * @param pVCpu The cross context virtual CPU structure.
2338 *
2339 * @remarks We don't really care about optimizing vmwrites here as it's done only
2340 * once per VM and hence we don't care about VMCS-field cache comparisons.
2341 */
2342static int hmR0VmxSetupProcCtls2(PVMCPU pVCpu)
2343{
2344 PVM pVM = pVCpu->CTX_SUFF(pVM);
2345 uint32_t fVal = pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed0; /* Bits set here must be set in the VMCS. */
2346 uint32_t const fZap = pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2347
2348 /* WBINVD causes a VM-exit. */
2349 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_WBINVD_EXIT)
2350 fVal |= VMX_PROC_CTLS2_WBINVD_EXIT;
2351
2352 /* Enable EPT (aka nested-paging). */
2353 if (pVM->hm.s.fNestedPaging)
2354 fVal |= VMX_PROC_CTLS2_EPT;
2355
2356 /*
2357 * Enable the INVPCID instruction if supported by the hardware and we expose
2358 * it to the guest. Without this, guest executing INVPCID would cause a #UD.
2359 */
2360 if ( (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_INVPCID)
2361 && pVM->cpum.ro.GuestFeatures.fInvpcid)
2362 fVal |= VMX_PROC_CTLS2_INVPCID;
2363
2364 /* Enable VPID. */
2365 if (pVM->hm.s.vmx.fVpid)
2366 fVal |= VMX_PROC_CTLS2_VPID;
2367
2368 /* Enable Unrestricted guest execution. */
2369 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2370 fVal |= VMX_PROC_CTLS2_UNRESTRICTED_GUEST;
2371
2372#if 0
2373 if (pVM->hm.s.fVirtApicRegs)
2374 {
2375 /* Enable APIC-register virtualization. */
2376 Assert(pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_APIC_REG_VIRT);
2377 fVal |= VMX_PROC_CTLS2_APIC_REG_VIRT;
2378
2379 /* Enable virtual-interrupt delivery. */
2380 Assert(pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_INTR_DELIVERY);
2381 fVal |= VMX_PROC_CTLS2_VIRT_INTR_DELIVERY;
2382 }
2383#endif
2384
2385 /* Virtualize-APIC accesses if supported by the CPU. The virtual-APIC page is where the TPR shadow resides. */
2386 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
2387 * done dynamically. */
2388 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
2389 {
2390 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
2391 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
2392 fVal |= VMX_PROC_CTLS2_VIRT_APIC_ACCESS; /* Virtualize APIC accesses. */
2393 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
2394 AssertRCReturn(rc, rc);
2395 }
2396
2397 /* Enable RDTSCP. */
2398 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_RDTSCP)
2399 fVal |= VMX_PROC_CTLS2_RDTSCP;
2400
2401 /* Enable Pause-Loop exiting. */
2402 if ( pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT
2403 && pVM->hm.s.vmx.cPleGapTicks
2404 && pVM->hm.s.vmx.cPleWindowTicks)
2405 {
2406 fVal |= VMX_PROC_CTLS2_PAUSE_LOOP_EXIT;
2407
2408 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks);
2409 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks);
2410 AssertRCReturn(rc, rc);
2411 }
2412
2413 if ((fVal & fZap) != fVal)
2414 {
2415 LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
2416 pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed0, fVal, fZap));
2417 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
2418 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2419 }
2420
2421 /* Commit it to the VMCS and update our cache. */
2422 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, fVal);
2423 AssertRCReturn(rc, rc);
2424 pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 = fVal;
2425
2426 return VINF_SUCCESS;
2427}
2428
2429
2430/**
2431 * Sets up processor-based VM-execution controls in the VMCS.
2432 *
2433 * @returns VBox status code.
2434 * @param pVCpu The cross context virtual CPU structure.
2435 *
2436 * @remarks We don't really care about optimizing vmwrites here as it's done only
2437 * once per VM and hence we don't care about VMCS-field cache comparisons.
2438 */
2439static int hmR0VmxSetupProcCtls(PVMCPU pVCpu)
2440{
2441 PVM pVM = pVCpu->CTX_SUFF(pVM);
2442 uint32_t fVal = pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
2443 uint32_t const fZap = pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2444
2445 fVal |= VMX_PROC_CTLS_HLT_EXIT /* HLT causes a VM-exit. */
2446 | VMX_PROC_CTLS_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
2447 | VMX_PROC_CTLS_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
2448 | VMX_PROC_CTLS_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
2449 | VMX_PROC_CTLS_RDPMC_EXIT /* RDPMC causes a VM-exit. */
2450 | VMX_PROC_CTLS_MONITOR_EXIT /* MONITOR causes a VM-exit. */
2451 | VMX_PROC_CTLS_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
2452
2453 /* We toggle VMX_PROC_CTLS_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
2454 if ( !(pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MOV_DR_EXIT)
2455 || (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed0 & VMX_PROC_CTLS_MOV_DR_EXIT))
2456 {
2457 LogRelFunc(("Unsupported VMX_PROC_CTLS_MOV_DR_EXIT combo!"));
2458 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
2459 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2460 }
2461
2462 /* Without Nested Paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
2463 if (!pVM->hm.s.fNestedPaging)
2464 {
2465 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
2466 fVal |= VMX_PROC_CTLS_INVLPG_EXIT
2467 | VMX_PROC_CTLS_CR3_LOAD_EXIT
2468 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2469 }
2470
2471 /* Use TPR shadowing if supported by the CPU. */
2472 if ( PDMHasApic(pVM)
2473 && pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW)
2474 {
2475 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2476 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
2477 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
2478 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
2479 AssertRCReturn(rc, rc);
2480
2481 fVal |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
2482 /* CR8 writes cause a VM-exit based on TPR threshold. */
2483 Assert(!(fVal & VMX_PROC_CTLS_CR8_STORE_EXIT));
2484 Assert(!(fVal & VMX_PROC_CTLS_CR8_LOAD_EXIT));
2485 }
2486 else
2487 {
2488 /*
2489 * Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is invalid on 32-bit Intel CPUs.
2490 * Set this control only for 64-bit guests.
2491 */
2492 if (pVM->hm.s.fAllow64BitGuests)
2493 {
2494 fVal |= VMX_PROC_CTLS_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */
2495 | VMX_PROC_CTLS_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */
2496 }
2497 }
2498
2499 /* Use MSR-bitmaps if supported by the CPU. */
2500 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
2501 {
2502 fVal |= VMX_PROC_CTLS_USE_MSR_BITMAPS;
2503
2504 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2505 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
2506 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2507 AssertRCReturn(rc, rc);
2508
2509 /*
2510 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
2511 * automatically using dedicated fields in the VMCS.
2512 */
2513 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2514 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2515 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2516 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2517 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2518#if HC_ARCH_BITS == 64
2519 /*
2520 * Set passthru permissions for the following MSRs (mandatory for VT-x) required for 64-bit guests.
2521 */
2522 if (pVM->hm.s.fAllow64BitGuests)
2523 {
2524 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2525 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2526 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2527 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2528 }
2529#endif
2530 /*
2531 * The IA32_PRED_CMD and IA32_FLUSH_CMD MSRs are write-only and has no state
2532 * associated with then. We never need to intercept access (writes need to
2533 * be executed without exiting, reads will #GP-fault anyway).
2534 */
2535 if (pVM->cpum.ro.GuestFeatures.fIbpb)
2536 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_PRED_CMD, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2537 if (pVM->cpum.ro.GuestFeatures.fFlushCmd)
2538 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_FLUSH_CMD, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2539
2540 /* Though MSR_IA32_PERF_GLOBAL_CTRL is saved/restored lazily, we want intercept reads/write to it for now. */
2541 }
2542
2543 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
2544 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
2545 fVal |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
2546
2547 if ((fVal & fZap) != fVal)
2548 {
2549 LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
2550 pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed0, fVal, fZap));
2551 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
2552 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2553 }
2554
2555 /* Commit it to the VMCS and update our cache. */
2556 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, fVal);
2557 AssertRCReturn(rc, rc);
2558 pVCpu->hm.s.vmx.Ctls.u32ProcCtls = fVal;
2559
2560 /* Set up secondary processor-based VM-execution controls if the CPU supports it. */
2561 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
2562 return hmR0VmxSetupProcCtls2(pVCpu);
2563
2564 /* Sanity check, should not really happen. */
2565 if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest))
2566 {
2567 LogRelFunc(("Unrestricted Guest enabled when secondary processor-based VM-execution controls not available\n"));
2568 pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO;
2569 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2570 }
2571
2572 /* Old CPUs without secondary processor-based VM-execution controls would end up here. */
2573 return VINF_SUCCESS;
2574}
2575
2576
2577/**
2578 * Sets up miscellaneous (everything other than Pin & Processor-based
2579 * VM-execution) control fields in the VMCS.
2580 *
2581 * @returns VBox status code.
2582 * @param pVCpu The cross context virtual CPU structure.
2583 */
2584static int hmR0VmxSetupMiscCtls(PVMCPU pVCpu)
2585{
2586 AssertPtr(pVCpu);
2587
2588 int rc = VERR_GENERAL_FAILURE;
2589
2590 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2591#if 0
2592 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxExportGuestCR3AndCR4())*/
2593 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0);
2594 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0);
2595
2596 /*
2597 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
2598 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
2599 * We thus use the exception bitmap to control it rather than use both.
2600 */
2601 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0);
2602 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0);
2603
2604 /* All IO & IOIO instructions cause VM-exits. */
2605 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0);
2606 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0);
2607
2608 /* Initialize the MSR-bitmap area. */
2609 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
2610 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);
2611 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0);
2612 AssertRCReturn(rc, rc);
2613#endif
2614
2615 /* Setup MSR auto-load/store area. */
2616 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
2617 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
2618 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2619 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2620 AssertRCReturn(rc, rc);
2621
2622 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
2623 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
2624 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
2625 AssertRCReturn(rc, rc);
2626
2627 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
2628 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
2629 AssertRCReturn(rc, rc);
2630
2631 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2632#if 0
2633 /* Setup debug controls */
2634 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0);
2635 rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, 0);
2636 AssertRCReturn(rc, rc);
2637#endif
2638
2639 return rc;
2640}
2641
2642
2643/**
2644 * Sets up the initial exception bitmap in the VMCS based on static conditions.
2645 *
2646 * We shall setup those exception intercepts that don't change during the
2647 * lifetime of the VM here. The rest are done dynamically while loading the
2648 * guest state.
2649 *
2650 * @returns VBox status code.
2651 * @param pVCpu The cross context virtual CPU structure.
2652 */
2653static int hmR0VmxInitXcptBitmap(PVMCPU pVCpu)
2654{
2655 AssertPtr(pVCpu);
2656
2657 uint32_t uXcptBitmap;
2658
2659 /* Must always intercept #AC to prevent the guest from hanging the CPU. */
2660 uXcptBitmap = RT_BIT_32(X86_XCPT_AC);
2661
2662 /* Because we need to maintain the DR6 state even when intercepting DRx reads
2663 and writes, and because recursive #DBs can cause the CPU hang, we must always
2664 intercept #DB. */
2665 uXcptBitmap |= RT_BIT_32(X86_XCPT_DB);
2666
2667 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
2668 if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
2669 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2670
2671 /* Commit it to the VMCS. */
2672 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2673 AssertRCReturn(rc, rc);
2674
2675 /* Update our cache of the exception bitmap. */
2676 pVCpu->hm.s.vmx.Ctls.u32XcptBitmap = uXcptBitmap;
2677 return VINF_SUCCESS;
2678}
2679
2680
2681/**
2682 * Does per-VM VT-x initialization.
2683 *
2684 * @returns VBox status code.
2685 * @param pVM The cross context VM structure.
2686 */
2687VMMR0DECL(int) VMXR0InitVM(PVM pVM)
2688{
2689 LogFlowFunc(("pVM=%p\n", pVM));
2690
2691 int rc = hmR0VmxStructsAlloc(pVM);
2692 if (RT_FAILURE(rc))
2693 {
2694 LogRelFunc(("hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
2695 return rc;
2696 }
2697
2698 return VINF_SUCCESS;
2699}
2700
2701
2702/**
2703 * Does per-VM VT-x termination.
2704 *
2705 * @returns VBox status code.
2706 * @param pVM The cross context VM structure.
2707 */
2708VMMR0DECL(int) VMXR0TermVM(PVM pVM)
2709{
2710 LogFlowFunc(("pVM=%p\n", pVM));
2711
2712#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2713 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
2714 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
2715#endif
2716 hmR0VmxStructsFree(pVM);
2717 return VINF_SUCCESS;
2718}
2719
2720
2721/**
2722 * Sets up the VM for execution under VT-x.
2723 * This function is only called once per-VM during initialization.
2724 *
2725 * @returns VBox status code.
2726 * @param pVM The cross context VM structure.
2727 */
2728VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
2729{
2730 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
2731 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2732
2733 LogFlowFunc(("pVM=%p\n", pVM));
2734
2735 /*
2736 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be
2737 * allocated. We no longer support the highly unlikely case of UnrestrictedGuest without
2738 * pRealModeTSS, see hmR3InitFinalizeR0Intel().
2739 */
2740 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
2741 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
2742 || !pVM->hm.s.vmx.pRealModeTSS))
2743 {
2744 LogRelFunc(("Invalid real-on-v86 state.\n"));
2745 return VERR_INTERNAL_ERROR;
2746 }
2747
2748 /* Initialize these always, see hmR3InitFinalizeR0().*/
2749 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NONE;
2750 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NONE;
2751
2752 /* Setup the tagged-TLB flush handlers. */
2753 int rc = hmR0VmxSetupTaggedTlb(pVM);
2754 if (RT_FAILURE(rc))
2755 {
2756 LogRelFunc(("hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
2757 return rc;
2758 }
2759
2760 /* Check if we can use the VMCS controls for swapping the EFER MSR. */
2761 Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer);
2762#if HC_ARCH_BITS == 64
2763 if ( (pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1 & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
2764 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_LOAD_EFER_MSR)
2765 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_EFER_MSR))
2766 {
2767 pVM->hm.s.vmx.fSupportsVmcsEfer = true;
2768 }
2769#endif
2770
2771 /* At least verify VMX is enabled, since we can't check if we're in VMX root mode without #GP'ing. */
2772 RTCCUINTREG const uHostCR4 = ASMGetCR4();
2773 if (RT_UNLIKELY(!(uHostCR4 & X86_CR4_VMXE)))
2774 return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
2775
2776 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2777 {
2778 PVMCPU pVCpu = &pVM->aCpus[i];
2779 AssertPtr(pVCpu);
2780 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
2781
2782 /* Log the VCPU pointers, useful for debugging SMP VMs. */
2783 Log4Func(("pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
2784
2785 /* Set revision dword at the beginning of the VMCS structure. */
2786 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID);
2787
2788 /* Set the VMCS launch state to "clear", see Intel spec. 31.6 "Preparation and launch a virtual machine". */
2789 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2790 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc\n", rc),
2791 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2792
2793 /* Load this VMCS as the current VMCS. */
2794 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2795 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc\n", rc),
2796 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2797
2798 rc = hmR0VmxSetupPinCtls(pVCpu);
2799 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc\n", rc),
2800 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2801
2802 rc = hmR0VmxSetupProcCtls(pVCpu);
2803 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc\n", rc),
2804 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2805
2806 rc = hmR0VmxSetupMiscCtls(pVCpu);
2807 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc\n", rc),
2808 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2809
2810 rc = hmR0VmxInitXcptBitmap(pVCpu);
2811 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc\n", rc),
2812 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2813
2814#if HC_ARCH_BITS == 32
2815 rc = hmR0VmxInitVmcsReadCache(pVCpu);
2816 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc\n", rc),
2817 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2818#endif
2819
2820 /* Sync any CPU internal VMCS data back into our VMCS in memory. */
2821 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2822 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc\n", rc),
2823 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2824
2825 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR;
2826
2827 hmR0VmxUpdateErrorRecord(pVCpu, rc);
2828 }
2829
2830 return VINF_SUCCESS;
2831}
2832
2833
2834/**
2835 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
2836 * the VMCS.
2837 *
2838 * @returns VBox status code.
2839 */
2840static int hmR0VmxExportHostControlRegs(void)
2841{
2842 RTCCUINTREG uReg = ASMGetCR0();
2843 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
2844 AssertRCReturn(rc, rc);
2845
2846 uReg = ASMGetCR3();
2847 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2848 AssertRCReturn(rc, rc);
2849
2850 uReg = ASMGetCR4();
2851 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2852 AssertRCReturn(rc, rc);
2853 return rc;
2854}
2855
2856
2857/**
2858 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2859 * the host-state area in the VMCS.
2860 *
2861 * @returns VBox status code.
2862 * @param pVCpu The cross context virtual CPU structure.
2863 */
2864static int hmR0VmxExportHostSegmentRegs(PVMCPU pVCpu)
2865{
2866#if HC_ARCH_BITS == 64
2867/**
2868 * Macro for adjusting host segment selectors to satisfy VT-x's VM-entry
2869 * requirements. See hmR0VmxExportHostSegmentRegs().
2870 */
2871# define VMXLOCAL_ADJUST_HOST_SEG(seg, selValue) \
2872 if ((selValue) & (X86_SEL_RPL | X86_SEL_LDT)) \
2873 { \
2874 bool fValidSelector = true; \
2875 if ((selValue) & X86_SEL_LDT) \
2876 { \
2877 uint32_t uAttr = ASMGetSegAttr((selValue)); \
2878 fValidSelector = RT_BOOL(uAttr != UINT32_MAX && (uAttr & X86_DESC_P)); \
2879 } \
2880 if (fValidSelector) \
2881 { \
2882 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##seg; \
2883 pVCpu->hm.s.vmx.RestoreHost.uHostSel##seg = (selValue); \
2884 } \
2885 (selValue) = 0; \
2886 }
2887
2888 /*
2889 * If we've executed guest code using VT-x, the host-state bits will be messed up. We
2890 * should -not- save the messed up state without restoring the original host-state,
2891 * see @bugref{7240}.
2892 *
2893 * This apparently can happen (most likely the FPU changes), deal with it rather than
2894 * asserting. Was observed booting Solaris 10u10 32-bit guest.
2895 */
2896 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
2897 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
2898 {
2899 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags,
2900 pVCpu->idCpu));
2901 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
2902 }
2903 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
2904#else
2905 RT_NOREF(pVCpu);
2906#endif
2907
2908 /*
2909 * Host DS, ES, FS and GS segment registers.
2910 */
2911#if HC_ARCH_BITS == 64
2912 RTSEL uSelDS = ASMGetDS();
2913 RTSEL uSelES = ASMGetES();
2914 RTSEL uSelFS = ASMGetFS();
2915 RTSEL uSelGS = ASMGetGS();
2916#else
2917 RTSEL uSelDS = 0;
2918 RTSEL uSelES = 0;
2919 RTSEL uSelFS = 0;
2920 RTSEL uSelGS = 0;
2921#endif
2922
2923 /*
2924 * Host CS and SS segment registers.
2925 */
2926 RTSEL uSelCS = ASMGetCS();
2927 RTSEL uSelSS = ASMGetSS();
2928
2929 /*
2930 * Host TR segment register.
2931 */
2932 RTSEL uSelTR = ASMGetTR();
2933
2934#if HC_ARCH_BITS == 64
2935 /*
2936 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to
2937 * gain VM-entry and restore them before we get preempted.
2938 *
2939 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
2940 */
2941 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS);
2942 VMXLOCAL_ADJUST_HOST_SEG(ES, uSelES);
2943 VMXLOCAL_ADJUST_HOST_SEG(FS, uSelFS);
2944 VMXLOCAL_ADJUST_HOST_SEG(GS, uSelGS);
2945# undef VMXLOCAL_ADJUST_HOST_SEG
2946#endif
2947
2948 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2949 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2950 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2951 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2952 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2953 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2954 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2955 Assert(!(uSelTR & X86_SEL_RPL)); Assert(!(uSelTR & X86_SEL_LDT));
2956 Assert(uSelCS);
2957 Assert(uSelTR);
2958
2959 /* Assertion is right but we would not have updated u32ExitCtls yet. */
2960#if 0
2961 if (!(pVCpu->hm.s.vmx.Ctls.u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE))
2962 Assert(uSelSS != 0);
2963#endif
2964
2965 /* Write these host selector fields into the host-state area in the VMCS. */
2966 int rc = VMXWriteVmcs32(VMX_VMCS16_HOST_CS_SEL, uSelCS);
2967 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_SS_SEL, uSelSS);
2968#if HC_ARCH_BITS == 64
2969 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_DS_SEL, uSelDS);
2970 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_ES_SEL, uSelES);
2971 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FS_SEL, uSelFS);
2972 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_GS_SEL, uSelGS);
2973#else
2974 NOREF(uSelDS);
2975 NOREF(uSelES);
2976 NOREF(uSelFS);
2977 NOREF(uSelGS);
2978#endif
2979 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_TR_SEL, uSelTR);
2980 AssertRCReturn(rc, rc);
2981
2982 /*
2983 * Host GDTR and IDTR.
2984 */
2985 RTGDTR Gdtr;
2986 RTIDTR Idtr;
2987 RT_ZERO(Gdtr);
2988 RT_ZERO(Idtr);
2989 ASMGetGDTR(&Gdtr);
2990 ASMGetIDTR(&Idtr);
2991 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt);
2992 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt);
2993 AssertRCReturn(rc, rc);
2994
2995#if HC_ARCH_BITS == 64
2996 /*
2997 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps
2998 * them to the maximum limit (0xffff) on every VM-exit.
2999 */
3000 if (Gdtr.cbGdt != 0xffff)
3001 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
3002
3003 /*
3004 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT" and
3005 * Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the limit
3006 * as 0xfff, VT-x bloating the limit to 0xffff shouldn't cause any different CPU behavior.
3007 * However, several hosts either insists on 0xfff being the limit (Windows Patch Guard) or
3008 * uses the limit for other purposes (darwin puts the CPU ID in there but botches sidt
3009 * alignment in at least one consumer). So, we're only allowing the IDTR.LIMIT to be left
3010 * at 0xffff on hosts where we are sure it won't cause trouble.
3011 */
3012# if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
3013 if (Idtr.cbIdt < 0x0fff)
3014# else
3015 if (Idtr.cbIdt != 0xffff)
3016# endif
3017 {
3018 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
3019 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
3020 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
3021 }
3022#endif
3023
3024 /*
3025 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI
3026 * and RPL bits is effectively what the CPU does for "scaling by 8". TI is always 0 and
3027 * RPL should be too in most cases.
3028 */
3029 AssertMsgReturn((uSelTR | X86_SEL_RPL_LDT) <= Gdtr.cbGdt,
3030 ("TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt), VERR_VMX_INVALID_HOST_STATE);
3031
3032 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
3033#if HC_ARCH_BITS == 64
3034 uintptr_t uTRBase = X86DESC64_BASE(pDesc);
3035
3036 /*
3037 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on
3038 * all VM-exits. The type is the same for 64-bit busy TSS[1]. The limit needs manual
3039 * restoration if the host has something else. Task switching is not supported in 64-bit
3040 * mode[2], but the limit still matters as IOPM is supported in 64-bit mode. Restoring the
3041 * limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
3042 *
3043 * [1] See Intel spec. 3.5 "System Descriptor Types".
3044 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
3045 */
3046 PVM pVM = pVCpu->CTX_SUFF(pVM);
3047 Assert(pDesc->System.u4Type == 11);
3048 if ( pDesc->System.u16LimitLow != 0x67
3049 || pDesc->System.u4LimitHigh)
3050 {
3051 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
3052 /* If the host has made GDT read-only, we would need to temporarily toggle CR0.WP before writing the GDT. */
3053 if (pVM->hm.s.fHostKernelFeatures & SUPKERNELFEATURES_GDT_READ_ONLY)
3054 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_READ_ONLY;
3055 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR;
3056 }
3057
3058 /*
3059 * Store the GDTR as we need it when restoring the GDT and while restoring the TR.
3060 */
3061 if (pVCpu->hm.s.vmx.fRestoreHostFlags & (VMX_RESTORE_HOST_GDTR | VMX_RESTORE_HOST_SEL_TR))
3062 {
3063 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
3064 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
3065 if (pVM->hm.s.fHostKernelFeatures & SUPKERNELFEATURES_GDT_NEED_WRITABLE)
3066 {
3067 /* The GDT is read-only but the writable GDT is available. */
3068 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_NEED_WRITABLE;
3069 pVCpu->hm.s.vmx.RestoreHost.HostGdtrRw.cb = Gdtr.cbGdt;
3070 rc = SUPR0GetCurrentGdtRw(&pVCpu->hm.s.vmx.RestoreHost.HostGdtrRw.uAddr);
3071 AssertRCReturn(rc, rc);
3072 }
3073 }
3074#else
3075 uintptr_t uTRBase = X86DESC_BASE(pDesc);
3076#endif
3077 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
3078 AssertRCReturn(rc, rc);
3079
3080 /*
3081 * Host FS base and GS base.
3082 */
3083#if HC_ARCH_BITS == 64
3084 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
3085 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
3086 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase);
3087 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase);
3088 AssertRCReturn(rc, rc);
3089
3090 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
3091 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
3092 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
3093 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
3094 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
3095#endif
3096 return VINF_SUCCESS;
3097}
3098
3099
3100/**
3101 * Exports certain host MSRs in the VM-exit MSR-load area and some in the
3102 * host-state area of the VMCS.
3103 *
3104 * Theses MSRs will be automatically restored on the host after every successful
3105 * VM-exit.
3106 *
3107 * @returns VBox status code.
3108 * @param pVCpu The cross context virtual CPU structure.
3109 *
3110 * @remarks No-long-jump zone!!!
3111 */
3112static int hmR0VmxExportHostMsrs(PVMCPU pVCpu)
3113{
3114 AssertPtr(pVCpu);
3115 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
3116
3117 /*
3118 * Save MSRs that we restore lazily (due to preemption or transition to ring-3)
3119 * rather than swapping them on every VM-entry.
3120 */
3121 hmR0VmxLazySaveHostMsrs(pVCpu);
3122
3123 /*
3124 * Host Sysenter MSRs.
3125 */
3126 int rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
3127#if HC_ARCH_BITS == 32
3128 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
3129 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
3130#else
3131 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
3132 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
3133#endif
3134 AssertRCReturn(rc, rc);
3135
3136 /*
3137 * Host EFER MSR.
3138 *
3139 * If the CPU supports the newer VMCS controls for managing EFER, use it. Otherwise it's
3140 * done as part of auto-load/store MSR area in the VMCS, see hmR0VmxExportGuestMsrs().
3141 */
3142 PVM pVM = pVCpu->CTX_SUFF(pVM);
3143 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
3144 {
3145 rc = VMXWriteVmcs64(VMX_VMCS64_HOST_EFER_FULL, pVM->hm.s.vmx.u64HostEfer);
3146 AssertRCReturn(rc, rc);
3147 }
3148
3149 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see hmR0VmxExportGuestExitCtls(). */
3150
3151 return VINF_SUCCESS;
3152}
3153
3154
3155/**
3156 * Figures out if we need to swap the EFER MSR which is particularly expensive.
3157 *
3158 * We check all relevant bits. For now, that's everything besides LMA/LME, as
3159 * these two bits are handled by VM-entry, see hmR0VmxExportGuestExitCtls() and
3160 * hmR0VMxExportGuestEntryCtls().
3161 *
3162 * @returns true if we need to load guest EFER, false otherwise.
3163 * @param pVCpu The cross context virtual CPU structure.
3164 *
3165 * @remarks Requires EFER, CR4.
3166 * @remarks No-long-jump zone!!!
3167 */
3168static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu)
3169{
3170#ifdef HMVMX_ALWAYS_SWAP_EFER
3171 RT_NOREF(pVCpu);
3172 return true;
3173#else
3174
3175 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3176#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
3177 /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */
3178 if (CPUMIsGuestInLongModeEx(pCtx))
3179 return false;
3180#endif
3181
3182 PVM pVM = pVCpu->CTX_SUFF(pVM);
3183 uint64_t const u64HostEfer = pVM->hm.s.vmx.u64HostEfer;
3184 uint64_t const u64GuestEfer = pCtx->msrEFER;
3185
3186 /*
3187 * For 64-bit guests, if EFER.SCE bit differs, we need to swap EFER to ensure that the
3188 * guest's SYSCALL behaviour isn't broken, see @bugref{7386}.
3189 */
3190 if ( CPUMIsGuestInLongModeEx(pCtx)
3191 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
3192 {
3193 return true;
3194 }
3195
3196 /*
3197 * If the guest uses PAE and EFER.NXE bit differs, we need to swap EFER as it
3198 * affects guest paging. 64-bit paging implies CR4.PAE as well.
3199 * See Intel spec. 4.5 "IA-32e Paging" and Intel spec. 4.1.1 "Three Paging Modes".
3200 */
3201 if ( (pCtx->cr4 & X86_CR4_PAE)
3202 && (pCtx->cr0 & X86_CR0_PG)
3203 && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE))
3204 {
3205 /* Assert that host is NX capable. */
3206 Assert(pVCpu->CTX_SUFF(pVM)->cpum.ro.HostFeatures.fNoExecute);
3207 return true;
3208 }
3209
3210 return false;
3211#endif
3212}
3213
3214
3215/**
3216 * Exports the guest state with appropriate VM-entry controls in the VMCS.
3217 *
3218 * These controls can affect things done on VM-exit; e.g. "load debug controls",
3219 * see Intel spec. 24.8.1 "VM-entry controls".
3220 *
3221 * @returns VBox status code.
3222 * @param pVCpu The cross context virtual CPU structure.
3223 *
3224 * @remarks Requires EFER.
3225 * @remarks No-long-jump zone!!!
3226 */
3227static int hmR0VmxExportGuestEntryCtls(PVMCPU pVCpu)
3228{
3229 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_CTLS)
3230 {
3231 PVM pVM = pVCpu->CTX_SUFF(pVM);
3232 uint32_t fVal = pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
3233 uint32_t const fZap = pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3234
3235 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
3236 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
3237
3238 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
3239 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
3240 {
3241 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
3242 Log4Func(("VMX_ENTRY_CTLS_IA32E_MODE_GUEST\n"));
3243 }
3244 else
3245 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
3246
3247 /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */
3248 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3249 && hmR0VmxShouldSwapEferMsr(pVCpu))
3250 {
3251 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
3252 Log4Func(("VMX_ENTRY_CTLS_LOAD_EFER_MSR\n"));
3253 }
3254
3255 /*
3256 * The following should -not- be set (since we're not in SMM mode):
3257 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
3258 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
3259 */
3260
3261 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
3262 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
3263
3264 if ((fVal & fZap) != fVal)
3265 {
3266 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
3267 pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed0, fVal, fZap));
3268 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
3269 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3270 }
3271
3272 /* Commit it to the VMCS and update our cache. */
3273 if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls != fVal)
3274 {
3275 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal);
3276 AssertRCReturn(rc, rc);
3277 pVCpu->hm.s.vmx.Ctls.u32EntryCtls = fVal;
3278 }
3279
3280 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_ENTRY_CTLS);
3281 }
3282 return VINF_SUCCESS;
3283}
3284
3285
3286/**
3287 * Exports the guest state with appropriate VM-exit controls in the VMCS.
3288 *
3289 * @returns VBox status code.
3290 * @param pVCpu The cross context virtual CPU structure.
3291 *
3292 * @remarks Requires EFER.
3293 */
3294static int hmR0VmxExportGuestExitCtls(PVMCPU pVCpu)
3295{
3296 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_EXIT_CTLS)
3297 {
3298 PVM pVM = pVCpu->CTX_SUFF(pVM);
3299 uint32_t fVal = pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
3300 uint32_t const fZap = pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3301
3302 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
3303 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
3304
3305 /*
3306 * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary.
3307 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in
3308 * hmR0VmxExportHostMsrs().
3309 */
3310#if HC_ARCH_BITS == 64
3311 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
3312 Log4Func(("VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE\n"));
3313#else
3314 Assert( pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64
3315 || pVCpu->hm.s.vmx.pfnStartVM == VMXR0StartVM32);
3316 /* Set the host address-space size based on the switcher, not guest state. See @bugref{8432}. */
3317 if (pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64)
3318 {
3319 /* The switcher returns to long mode, EFER is managed by the switcher. */
3320 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
3321 Log4Func(("VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE\n"));
3322 }
3323 else
3324 Assert(!(fVal & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE));
3325#endif
3326
3327 /* If the newer VMCS fields for managing EFER exists, use it. */
3328 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3329 && hmR0VmxShouldSwapEferMsr(pVCpu))
3330 {
3331 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
3332 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
3333 Log4Func(("VMX_EXIT_CTLS_SAVE_EFER_MSR and VMX_EXIT_CTLS_LOAD_EFER_MSR\n"));
3334 }
3335
3336 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
3337 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
3338
3339 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
3340 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
3341 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
3342
3343 /* Enable saving of the VMX preemption timer value on VM-exit. */
3344 if ( pVM->hm.s.vmx.fUsePreemptTimer
3345 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
3346 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
3347
3348 if ((fVal & fZap) != fVal)
3349 {
3350 LogRelFunc(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%R#X32\n",
3351 pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed0, fVal, fZap));
3352 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
3353 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3354 }
3355
3356 /* Commit it to the VMCS and update our cache. */
3357 if (pVCpu->hm.s.vmx.Ctls.u32ExitCtls != fVal)
3358 {
3359 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal);
3360 AssertRCReturn(rc, rc);
3361 pVCpu->hm.s.vmx.Ctls.u32ExitCtls = fVal;
3362 }
3363
3364 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_EXIT_CTLS);
3365 }
3366 return VINF_SUCCESS;
3367}
3368
3369
3370/**
3371 * Sets the TPR threshold in the VMCS.
3372 *
3373 * @returns VBox status code.
3374 * @param pVCpu The cross context virtual CPU structure.
3375 * @param u32TprThreshold The TPR threshold (task-priority class only).
3376 */
3377DECLINLINE(int) hmR0VmxApicSetTprThreshold(PVMCPU pVCpu, uint32_t u32TprThreshold)
3378{
3379 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
3380 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW); RT_NOREF_PV(pVCpu);
3381 return VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
3382}
3383
3384
3385/**
3386 * Exports the guest APIC TPR state into the VMCS.
3387 *
3388 * @returns VBox status code.
3389 * @param pVCpu The cross context virtual CPU structure.
3390 *
3391 * @remarks No-long-jump zone!!!
3392 */
3393static int hmR0VmxExportGuestApicTpr(PVMCPU pVCpu)
3394{
3395 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
3396 {
3397 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
3398
3399 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
3400 && APICIsEnabled(pVCpu))
3401 {
3402 /*
3403 * Setup TPR shadowing.
3404 */
3405 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
3406 {
3407 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
3408
3409 bool fPendingIntr = false;
3410 uint8_t u8Tpr = 0;
3411 uint8_t u8PendingIntr = 0;
3412 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
3413 AssertRCReturn(rc, rc);
3414
3415 /*
3416 * If there are interrupts pending but masked by the TPR, instruct VT-x to
3417 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
3418 * priority of the pending interrupt so we can deliver the interrupt. If there
3419 * are no interrupts pending, set threshold to 0 to not cause any
3420 * TPR-below-threshold VM-exits.
3421 */
3422 pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR] = u8Tpr;
3423 uint32_t u32TprThreshold = 0;
3424 if (fPendingIntr)
3425 {
3426 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR (which is the Task-Priority Class). */
3427 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
3428 const uint8_t u8TprPriority = u8Tpr >> 4;
3429 if (u8PendingPriority <= u8TprPriority)
3430 u32TprThreshold = u8PendingPriority;
3431 }
3432
3433 rc = hmR0VmxApicSetTprThreshold(pVCpu, u32TprThreshold);
3434 AssertRCReturn(rc, rc);
3435 }
3436 }
3437 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
3438 }
3439 return VINF_SUCCESS;
3440}
3441
3442
3443/**
3444 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
3445 *
3446 * @returns Guest's interruptibility-state.
3447 * @param pVCpu The cross context virtual CPU structure.
3448 *
3449 * @remarks No-long-jump zone!!!
3450 */
3451static uint32_t hmR0VmxGetGuestIntrState(PVMCPU pVCpu)
3452{
3453 /*
3454 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
3455 */
3456 uint32_t fIntrState = 0;
3457 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3458 {
3459 /* If inhibition is active, RIP & RFLAGS should've been accessed
3460 (i.e. read previously from the VMCS or from ring-3). */
3461 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3462#ifdef VBOX_STRICT
3463 uint64_t const fExtrn = ASMAtomicUoReadU64(&pCtx->fExtrn);
3464 AssertMsg(!(fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)), ("%#x\n", fExtrn));
3465#endif
3466 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
3467 {
3468 if (pCtx->eflags.Bits.u1IF)
3469 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
3470 else
3471 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
3472 }
3473 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3474 {
3475 /*
3476 * We can clear the inhibit force flag as even if we go back to the recompiler
3477 * without executing guest code in VT-x, the flag's condition to be cleared is
3478 * met and thus the cleared state is correct.
3479 */
3480 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3481 }
3482 }
3483
3484 /*
3485 * NMIs to the guest are blocked after an NMI is injected until the guest executes an IRET. We only
3486 * bother with virtual-NMI blocking when we have support for virtual NMIs in the CPU, otherwise
3487 * setting this would block host-NMIs and IRET will not clear the blocking.
3488 *
3489 * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}.
3490 */
3491 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)
3492 && (pVCpu->hm.s.vmx.Ctls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
3493 {
3494 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
3495 }
3496
3497 return fIntrState;
3498}
3499
3500
3501/**
3502 * Exports the exception intercepts required for guest execution in the VMCS.
3503 *
3504 * @returns VBox status code.
3505 * @param pVCpu The cross context virtual CPU structure.
3506 *
3507 * @remarks No-long-jump zone!!!
3508 */
3509static int hmR0VmxExportGuestXcptIntercepts(PVMCPU pVCpu)
3510{
3511 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS)
3512 {
3513 uint32_t uXcptBitmap = pVCpu->hm.s.vmx.Ctls.u32XcptBitmap;
3514
3515 /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxExportGuestCR0(). */
3516 if (pVCpu->hm.s.fGIMTrapXcptUD)
3517 uXcptBitmap |= RT_BIT(X86_XCPT_UD);
3518#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3519 else
3520 uXcptBitmap &= ~RT_BIT(X86_XCPT_UD);
3521#endif
3522
3523 Assert(uXcptBitmap & RT_BIT_32(X86_XCPT_AC));
3524 Assert(uXcptBitmap & RT_BIT_32(X86_XCPT_DB));
3525
3526 if (uXcptBitmap != pVCpu->hm.s.vmx.Ctls.u32XcptBitmap)
3527 {
3528 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
3529 AssertRCReturn(rc, rc);
3530 pVCpu->hm.s.vmx.Ctls.u32XcptBitmap = uXcptBitmap;
3531 }
3532
3533 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
3534 Log4Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64\n", uXcptBitmap));
3535 }
3536 return VINF_SUCCESS;
3537}
3538
3539
3540/**
3541 * Exports the guest's RIP into the guest-state area in the VMCS.
3542 *
3543 * @returns VBox status code.
3544 * @param pVCpu The cross context virtual CPU structure.
3545 *
3546 * @remarks No-long-jump zone!!!
3547 */
3548static int hmR0VmxExportGuestRip(PVMCPU pVCpu)
3549{
3550 int rc = VINF_SUCCESS;
3551 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP)
3552 {
3553 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
3554
3555 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
3556 AssertRCReturn(rc, rc);
3557
3558 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RIP);
3559 Log4Func(("RIP=%#RX64\n", pVCpu->cpum.GstCtx.rip));
3560 }
3561 return rc;
3562}
3563
3564
3565/**
3566 * Exports the guest's RSP into the guest-state area in the VMCS.
3567 *
3568 * @returns VBox status code.
3569 * @param pVCpu The cross context virtual CPU structure.
3570 *
3571 * @remarks No-long-jump zone!!!
3572 */
3573static int hmR0VmxExportGuestRsp(PVMCPU pVCpu)
3574{
3575 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RSP)
3576 {
3577 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP);
3578
3579 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pVCpu->cpum.GstCtx.rsp);
3580 AssertRCReturn(rc, rc);
3581
3582 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RSP);
3583 }
3584 return VINF_SUCCESS;
3585}
3586
3587
3588/**
3589 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
3590 *
3591 * @returns VBox status code.
3592 * @param pVCpu The cross context virtual CPU structure.
3593 *
3594 * @remarks No-long-jump zone!!!
3595 */
3596static int hmR0VmxExportGuestRflags(PVMCPU pVCpu)
3597{
3598 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
3599 {
3600 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
3601
3602 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
3603 Let us assert it as such and use 32-bit VMWRITE. */
3604 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
3605 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
3606 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
3607 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
3608
3609 /*
3610 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
3611 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
3612 * can run the real-mode guest code under Virtual 8086 mode.
3613 */
3614 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3615 {
3616 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3617 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3618 pVCpu->hm.s.vmx.RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
3619 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
3620 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
3621 }
3622
3623 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
3624 AssertRCReturn(rc, rc);
3625
3626 /*
3627 * Setup pending debug exceptions if the guest is single-stepping using EFLAGS.TF.
3628 *
3629 * We must avoid setting any automatic debug exceptions delivery when single-stepping
3630 * through the hypervisor debugger using EFLAGS.TF.
3631 */
3632 if ( !pVCpu->hm.s.fSingleInstruction
3633 && fEFlags.Bits.u1TF)
3634 {
3635 /** @todo r=ramshankar: Warning! We ASSUME EFLAGS.TF will not cleared on
3636 * premature trips to ring-3 esp since IEM does not yet handle it. */
3637 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS);
3638 AssertRCReturn(rc, rc);
3639 }
3640
3641 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
3642 Log4Func(("EFlags=%#RX32\n", fEFlags.u32));
3643 }
3644 return VINF_SUCCESS;
3645}
3646
3647
3648/**
3649 * Exports the guest CR0 control register into the guest-state area in the VMCS.
3650 *
3651 * The guest FPU state is always pre-loaded hence we don't need to bother about
3652 * sharing FPU related CR0 bits between the guest and host.
3653 *
3654 * @returns VBox status code.
3655 * @param pVCpu The cross context virtual CPU structure.
3656 *
3657 * @remarks No-long-jump zone!!!
3658 */
3659static int hmR0VmxExportGuestCR0(PVMCPU pVCpu)
3660{
3661 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR0)
3662 {
3663 PVM pVM = pVCpu->CTX_SUFF(pVM);
3664 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
3665 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.cr0));
3666
3667 uint32_t const u32ShadowCr0 = pVCpu->cpum.GstCtx.cr0;
3668 uint32_t u32GuestCr0 = pVCpu->cpum.GstCtx.cr0;
3669
3670 /*
3671 * Setup VT-x's view of the guest CR0.
3672 * Minimize VM-exits due to CR3 changes when we have NestedPaging.
3673 */
3674 uint32_t uProcCtls = pVCpu->hm.s.vmx.Ctls.u32ProcCtls;
3675 if (pVM->hm.s.fNestedPaging)
3676 {
3677 if (CPUMIsGuestPagingEnabled(pVCpu))
3678 {
3679 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
3680 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
3681 | VMX_PROC_CTLS_CR3_STORE_EXIT);
3682 }
3683 else
3684 {
3685 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
3686 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
3687 | VMX_PROC_CTLS_CR3_STORE_EXIT;
3688 }
3689
3690 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
3691 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3692 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
3693 }
3694 else
3695 {
3696 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
3697 u32GuestCr0 |= X86_CR0_WP;
3698 }
3699
3700 /*
3701 * Guest FPU bits.
3702 *
3703 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
3704 * using CR0.TS.
3705 *
3706 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
3707 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
3708 */
3709 u32GuestCr0 |= X86_CR0_NE;
3710
3711 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
3712 bool const fInterceptMF = !(u32ShadowCr0 & X86_CR0_NE);
3713
3714 /*
3715 * Update exception intercepts.
3716 */
3717 uint32_t uXcptBitmap = pVCpu->hm.s.vmx.Ctls.u32XcptBitmap;
3718 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3719 {
3720 Assert(PDMVmmDevHeapIsEnabled(pVM));
3721 Assert(pVM->hm.s.vmx.pRealModeTSS);
3722 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
3723 }
3724 else
3725 {
3726 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
3727 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
3728 if (fInterceptMF)
3729 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
3730 }
3731
3732 /* Additional intercepts for debugging, define these yourself explicitly. */
3733#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3734 uXcptBitmap |= 0
3735 | RT_BIT(X86_XCPT_BP)
3736 | RT_BIT(X86_XCPT_DE)
3737 | RT_BIT(X86_XCPT_NM)
3738 | RT_BIT(X86_XCPT_TS)
3739 | RT_BIT(X86_XCPT_UD)
3740 | RT_BIT(X86_XCPT_NP)
3741 | RT_BIT(X86_XCPT_SS)
3742 | RT_BIT(X86_XCPT_GP)
3743 | RT_BIT(X86_XCPT_PF)
3744 | RT_BIT(X86_XCPT_MF)
3745 ;
3746#elif defined(HMVMX_ALWAYS_TRAP_PF)
3747 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
3748#endif
3749 if (pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv)
3750 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
3751 Assert(pVM->hm.s.fNestedPaging || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
3752
3753 /*
3754 * Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW).
3755 */
3756 uint32_t fSetCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3757 uint32_t fZapCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3758 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
3759 fSetCr0 &= ~(X86_CR0_PE | X86_CR0_PG);
3760 else
3761 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
3762
3763 u32GuestCr0 |= fSetCr0;
3764 u32GuestCr0 &= fZapCr0;
3765 u32GuestCr0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
3766
3767 /*
3768 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
3769 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
3770 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
3771 */
3772 uint32_t u32Cr0Mask = X86_CR0_PE
3773 | X86_CR0_NE
3774 | (pVM->hm.s.fNestedPaging ? 0 : X86_CR0_WP)
3775 | X86_CR0_PG
3776 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
3777 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
3778 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
3779
3780 /** @todo Avoid intercepting CR0.PE with unrestricted guests. Fix PGM
3781 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
3782 * and @bugref{6944}. */
3783#if 0
3784 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3785 u32Cr0Mask &= ~X86_CR0_PE;
3786#endif
3787 /*
3788 * Finally, update VMCS fields with the CR0 values and the exception bitmap.
3789 */
3790 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCr0);
3791 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32ShadowCr0);
3792 if (u32Cr0Mask != pVCpu->hm.s.vmx.Ctls.u32Cr0Mask)
3793 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32Cr0Mask);
3794 if (uProcCtls != pVCpu->hm.s.vmx.Ctls.u32ProcCtls)
3795 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
3796 if (uXcptBitmap != pVCpu->hm.s.vmx.Ctls.u32XcptBitmap)
3797 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
3798 AssertRCReturn(rc, rc);
3799
3800 /* Update our caches. */
3801 pVCpu->hm.s.vmx.Ctls.u32Cr0Mask = u32Cr0Mask;
3802 pVCpu->hm.s.vmx.Ctls.u32ProcCtls = uProcCtls;
3803 pVCpu->hm.s.vmx.Ctls.u32XcptBitmap = uXcptBitmap;
3804
3805 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR0);
3806
3807 Log4Func(("u32Cr0Mask=%#RX32 u32ShadowCr0=%#RX32 u32GuestCr0=%#RX32 (fSetCr0=%#RX32 fZapCr0=%#RX32\n", u32Cr0Mask,
3808 u32ShadowCr0, u32GuestCr0, fSetCr0, fZapCr0));
3809 }
3810
3811 return VINF_SUCCESS;
3812}
3813
3814
3815/**
3816 * Exports the guest control registers (CR3, CR4) into the guest-state area
3817 * in the VMCS.
3818 *
3819 * @returns VBox strict status code.
3820 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
3821 * without unrestricted guest access and the VMMDev is not presently
3822 * mapped (e.g. EFI32).
3823 *
3824 * @param pVCpu The cross context virtual CPU structure.
3825 *
3826 * @remarks No-long-jump zone!!!
3827 */
3828static VBOXSTRICTRC hmR0VmxExportGuestCR3AndCR4(PVMCPU pVCpu)
3829{
3830 int rc = VINF_SUCCESS;
3831 PVM pVM = pVCpu->CTX_SUFF(pVM);
3832
3833 /*
3834 * Guest CR2.
3835 * It's always loaded in the assembler code. Nothing to do here.
3836 */
3837
3838 /*
3839 * Guest CR3.
3840 */
3841 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR3)
3842 {
3843 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
3844
3845 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
3846 if (pVM->hm.s.fNestedPaging)
3847 {
3848 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
3849
3850 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
3851 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
3852 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
3853 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
3854
3855 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
3856 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
3857 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
3858
3859 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
3860 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
3861 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
3862 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3863 AssertMsg( !((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
3864 || (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EPT_ACCESS_DIRTY),
3865 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3866
3867 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
3868 AssertRCReturn(rc, rc);
3869
3870 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3871 if ( pVM->hm.s.vmx.fUnrestrictedGuest
3872 || CPUMIsGuestPagingEnabledEx(pCtx))
3873 {
3874 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
3875 if (CPUMIsGuestInPAEModeEx(pCtx))
3876 {
3877 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
3878 AssertRCReturn(rc, rc);
3879 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u);
3880 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u);
3881 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u);
3882 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u);
3883 AssertRCReturn(rc, rc);
3884 }
3885
3886 /*
3887 * The guest's view of its CR3 is unblemished with Nested Paging when the
3888 * guest is using paging or we have unrestricted guest execution to handle
3889 * the guest when it's not using paging.
3890 */
3891 GCPhysGuestCR3 = pCtx->cr3;
3892 }
3893 else
3894 {
3895 /*
3896 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
3897 * thinks it accesses physical memory directly, we use our identity-mapped
3898 * page table to map guest-linear to guest-physical addresses. EPT takes care
3899 * of translating it to host-physical addresses.
3900 */
3901 RTGCPHYS GCPhys;
3902 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
3903
3904 /* We obtain it here every time as the guest could have relocated this PCI region. */
3905 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
3906 if (RT_SUCCESS(rc))
3907 { /* likely */ }
3908 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
3909 {
3910 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
3911 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
3912 }
3913 else
3914 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
3915
3916 GCPhysGuestCR3 = GCPhys;
3917 }
3918
3919 Log4Func(("u32GuestCr3=%#RGp (GstN)\n", GCPhysGuestCR3));
3920 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
3921 AssertRCReturn(rc, rc);
3922 }
3923 else
3924 {
3925 /* Non-nested paging case, just use the hypervisor's CR3. */
3926 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
3927
3928 Log4Func(("u32GuestCr3=%#RHv (HstN)\n", HCPhysGuestCR3));
3929 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
3930 AssertRCReturn(rc, rc);
3931 }
3932
3933 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR3);
3934 }
3935
3936 /*
3937 * Guest CR4.
3938 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
3939 */
3940 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR4)
3941 {
3942 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3943 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
3944 Assert(!RT_HI_U32(pCtx->cr4));
3945
3946 uint32_t u32GuestCr4 = pCtx->cr4;
3947 uint32_t const u32ShadowCr4 = pCtx->cr4;
3948
3949 /*
3950 * Setup VT-x's view of the guest CR4.
3951 *
3952 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
3953 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
3954 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
3955 *
3956 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
3957 */
3958 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3959 {
3960 Assert(pVM->hm.s.vmx.pRealModeTSS);
3961 Assert(PDMVmmDevHeapIsEnabled(pVM));
3962 u32GuestCr4 &= ~X86_CR4_VME;
3963 }
3964
3965 if (pVM->hm.s.fNestedPaging)
3966 {
3967 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
3968 && !pVM->hm.s.vmx.fUnrestrictedGuest)
3969 {
3970 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
3971 u32GuestCr4 |= X86_CR4_PSE;
3972 /* Our identity mapping is a 32-bit page directory. */
3973 u32GuestCr4 &= ~X86_CR4_PAE;
3974 }
3975 /* else use guest CR4.*/
3976 }
3977 else
3978 {
3979 /*
3980 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
3981 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
3982 */
3983 switch (pVCpu->hm.s.enmShadowMode)
3984 {
3985 case PGMMODE_REAL: /* Real-mode. */
3986 case PGMMODE_PROTECTED: /* Protected mode without paging. */
3987 case PGMMODE_32_BIT: /* 32-bit paging. */
3988 {
3989 u32GuestCr4 &= ~X86_CR4_PAE;
3990 break;
3991 }
3992
3993 case PGMMODE_PAE: /* PAE paging. */
3994 case PGMMODE_PAE_NX: /* PAE paging with NX. */
3995 {
3996 u32GuestCr4 |= X86_CR4_PAE;
3997 break;
3998 }
3999
4000 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
4001 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
4002#ifdef VBOX_ENABLE_64_BITS_GUESTS
4003 break;
4004#endif
4005 default:
4006 AssertFailed();
4007 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4008 }
4009 }
4010
4011 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
4012 uint64_t const fSetCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
4013 uint64_t const fZapCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
4014 u32GuestCr4 |= fSetCr4;
4015 u32GuestCr4 &= fZapCr4;
4016
4017 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them,
4018 that would cause a VM-exit. */
4019 uint32_t u32Cr4Mask = X86_CR4_VME
4020 | X86_CR4_PAE
4021 | X86_CR4_PGE
4022 | X86_CR4_PSE
4023 | X86_CR4_VMXE;
4024 if (pVM->cpum.ro.HostFeatures.fXSaveRstor)
4025 u32Cr4Mask |= X86_CR4_OSXSAVE;
4026 if (pVM->cpum.ro.GuestFeatures.fPcid)
4027 u32Cr4Mask |= X86_CR4_PCIDE;
4028
4029 /* Write VT-x's view of the guest CR4, the CR4 modify mask and the read-only CR4 shadow
4030 into the VMCS and update our cache. */
4031 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCr4);
4032 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32ShadowCr4);
4033 if (pVCpu->hm.s.vmx.Ctls.u32Cr4Mask != u32Cr4Mask)
4034 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32Cr4Mask);
4035 AssertRCReturn(rc, rc);
4036 pVCpu->hm.s.vmx.Ctls.u32Cr4Mask = u32Cr4Mask;
4037
4038 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
4039 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
4040
4041 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4);
4042
4043 Log4Func(("u32GuestCr4=%#RX32 u32ShadowCr4=%#RX32 (fSetCr4=%#RX32 fZapCr4=%#RX32)\n", u32GuestCr4, u32ShadowCr4, fSetCr4,
4044 fZapCr4));
4045 }
4046 return rc;
4047}
4048
4049
4050/**
4051 * Exports the guest debug registers into the guest-state area in the VMCS.
4052 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
4053 *
4054 * This also sets up whether \#DB and MOV DRx accesses cause VM-exits.
4055 *
4056 * @returns VBox status code.
4057 * @param pVCpu The cross context virtual CPU structure.
4058 *
4059 * @remarks No-long-jump zone!!!
4060 */
4061static int hmR0VmxExportSharedDebugState(PVMCPU pVCpu)
4062{
4063 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4064
4065#ifdef VBOX_STRICT
4066 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
4067 if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4068 {
4069 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
4070 Assert((pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0);
4071 Assert((pVCpu->cpum.GstCtx.dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK);
4072 }
4073#endif
4074
4075 bool fSteppingDB = false;
4076 bool fInterceptMovDRx = false;
4077 uint32_t uProcCtls = pVCpu->hm.s.vmx.Ctls.u32ProcCtls;
4078 if (pVCpu->hm.s.fSingleInstruction)
4079 {
4080 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
4081 PVM pVM = pVCpu->CTX_SUFF(pVM);
4082 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MONITOR_TRAP_FLAG)
4083 {
4084 uProcCtls |= VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
4085 Assert(fSteppingDB == false);
4086 }
4087 else
4088 {
4089 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_TF;
4090 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_RFLAGS;
4091 pVCpu->hm.s.fClearTrapFlag = true;
4092 fSteppingDB = true;
4093 }
4094 }
4095
4096 uint32_t u32GuestDr7;
4097 if ( fSteppingDB
4098 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
4099 {
4100 /*
4101 * Use the combined guest and host DRx values found in the hypervisor register set
4102 * because the debugger has breakpoints active or someone is single stepping on the
4103 * host side without a monitor trap flag.
4104 *
4105 * Note! DBGF expects a clean DR6 state before executing guest code.
4106 */
4107#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
4108 if ( CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
4109 && !CPUMIsHyperDebugStateActivePending(pVCpu))
4110 {
4111 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4112 Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
4113 Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
4114 }
4115 else
4116#endif
4117 if (!CPUMIsHyperDebugStateActive(pVCpu))
4118 {
4119 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4120 Assert(CPUMIsHyperDebugStateActive(pVCpu));
4121 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
4122 }
4123
4124 /* Update DR7 with the hypervisor value (other DRx registers are handled by CPUM one way or another). */
4125 u32GuestDr7 = (uint32_t)CPUMGetHyperDR7(pVCpu);
4126 pVCpu->hm.s.fUsingHyperDR7 = true;
4127 fInterceptMovDRx = true;
4128 }
4129 else
4130 {
4131 /*
4132 * If the guest has enabled debug registers, we need to load them prior to
4133 * executing guest code so they'll trigger at the right time.
4134 */
4135 if (pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
4136 {
4137#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
4138 if ( CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
4139 && !CPUMIsGuestDebugStateActivePending(pVCpu))
4140 {
4141 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4142 Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
4143 Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
4144 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4145 }
4146 else
4147#endif
4148 if (!CPUMIsGuestDebugStateActive(pVCpu))
4149 {
4150 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4151 Assert(CPUMIsGuestDebugStateActive(pVCpu));
4152 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
4153 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4154 }
4155 Assert(!fInterceptMovDRx);
4156 }
4157 /*
4158 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
4159 * must intercept #DB in order to maintain a correct DR6 guest value, and
4160 * because we need to intercept it to prevent nested #DBs from hanging the
4161 * CPU, we end up always having to intercept it. See hmR0VmxInitXcptBitmap.
4162 */
4163#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
4164 else if ( !CPUMIsGuestDebugStateActivePending(pVCpu)
4165 && !CPUMIsGuestDebugStateActive(pVCpu))
4166#else
4167 else if (!CPUMIsGuestDebugStateActive(pVCpu))
4168#endif
4169 {
4170 fInterceptMovDRx = true;
4171 }
4172
4173 /* Update DR7 with the actual guest value. */
4174 u32GuestDr7 = pVCpu->cpum.GstCtx.dr[7];
4175 pVCpu->hm.s.fUsingHyperDR7 = false;
4176 }
4177
4178 if (fInterceptMovDRx)
4179 uProcCtls |= VMX_PROC_CTLS_MOV_DR_EXIT;
4180 else
4181 uProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
4182
4183 /*
4184 * Update the processor-based VM-execution controls with the MOV-DRx intercepts and the
4185 * monitor-trap flag and update our cache.
4186 */
4187 if (uProcCtls != pVCpu->hm.s.vmx.Ctls.u32ProcCtls)
4188 {
4189 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
4190 AssertRCReturn(rc2, rc2);
4191 pVCpu->hm.s.vmx.Ctls.u32ProcCtls = uProcCtls;
4192 }
4193
4194 /*
4195 * Update guest DR7.
4196 */
4197 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, u32GuestDr7);
4198 AssertRCReturn(rc, rc);
4199
4200 /*
4201 * If we have forced EFLAGS.TF to be set because we're single-stepping in the hypervisor debugger,
4202 * we need to clear interrupt inhibition if any as otherwise it causes a VM-entry failure.
4203 *
4204 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
4205 */
4206 if (fSteppingDB)
4207 {
4208 Assert(pVCpu->hm.s.fSingleInstruction);
4209 Assert(pVCpu->cpum.GstCtx.eflags.Bits.u1TF);
4210
4211 uint32_t fIntrState = 0;
4212 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
4213 AssertRCReturn(rc, rc);
4214
4215 if (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
4216 {
4217 fIntrState &= ~(VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
4218 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
4219 AssertRCReturn(rc, rc);
4220 }
4221 }
4222
4223 return VINF_SUCCESS;
4224}
4225
4226
4227#ifdef VBOX_STRICT
4228/**
4229 * Strict function to validate segment registers.
4230 *
4231 * @param pVCpu The cross context virtual CPU structure.
4232 *
4233 * @remarks Will import guest CR0 on strict builds during validation of
4234 * segments.
4235 */
4236static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu)
4237{
4238 /*
4239 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
4240 *
4241 * The reason we check for attribute value 0 in this function and not just the unusable bit is
4242 * because hmR0VmxExportGuestSegmentReg() only updates the VMCS' copy of the value with the unusable bit
4243 * and doesn't change the guest-context value.
4244 */
4245 PVM pVM = pVCpu->CTX_SUFF(pVM);
4246 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4247 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0);
4248 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
4249 && ( !CPUMIsGuestInRealModeEx(pCtx)
4250 && !CPUMIsGuestInV86ModeEx(pCtx)))
4251 {
4252 /* Protected mode checks */
4253 /* CS */
4254 Assert(pCtx->cs.Attr.n.u1Present);
4255 Assert(!(pCtx->cs.Attr.u & 0xf00));
4256 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
4257 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4258 || !(pCtx->cs.Attr.n.u1Granularity));
4259 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
4260 || (pCtx->cs.Attr.n.u1Granularity));
4261 /* CS cannot be loaded with NULL in protected mode. */
4262 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
4263 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4264 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
4265 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4266 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
4267 else
4268 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
4269 /* SS */
4270 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4271 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
4272 if ( !(pCtx->cr0 & X86_CR0_PE)
4273 || pCtx->cs.Attr.n.u4Type == 3)
4274 {
4275 Assert(!pCtx->ss.Attr.n.u2Dpl);
4276 }
4277 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4278 {
4279 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4280 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
4281 Assert(pCtx->ss.Attr.n.u1Present);
4282 Assert(!(pCtx->ss.Attr.u & 0xf00));
4283 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
4284 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4285 || !(pCtx->ss.Attr.n.u1Granularity));
4286 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
4287 || (pCtx->ss.Attr.n.u1Granularity));
4288 }
4289 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSegmentReg(). */
4290 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4291 {
4292 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4293 Assert(pCtx->ds.Attr.n.u1Present);
4294 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
4295 Assert(!(pCtx->ds.Attr.u & 0xf00));
4296 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
4297 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4298 || !(pCtx->ds.Attr.n.u1Granularity));
4299 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
4300 || (pCtx->ds.Attr.n.u1Granularity));
4301 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4302 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
4303 }
4304 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4305 {
4306 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4307 Assert(pCtx->es.Attr.n.u1Present);
4308 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
4309 Assert(!(pCtx->es.Attr.u & 0xf00));
4310 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
4311 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
4312 || !(pCtx->es.Attr.n.u1Granularity));
4313 Assert( !(pCtx->es.u32Limit & 0xfff00000)
4314 || (pCtx->es.Attr.n.u1Granularity));
4315 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4316 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
4317 }
4318 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
4319 {
4320 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4321 Assert(pCtx->fs.Attr.n.u1Present);
4322 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
4323 Assert(!(pCtx->fs.Attr.u & 0xf00));
4324 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
4325 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
4326 || !(pCtx->fs.Attr.n.u1Granularity));
4327 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
4328 || (pCtx->fs.Attr.n.u1Granularity));
4329 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4330 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4331 }
4332 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
4333 {
4334 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4335 Assert(pCtx->gs.Attr.n.u1Present);
4336 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
4337 Assert(!(pCtx->gs.Attr.u & 0xf00));
4338 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
4339 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
4340 || !(pCtx->gs.Attr.n.u1Granularity));
4341 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
4342 || (pCtx->gs.Attr.n.u1Granularity));
4343 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4344 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4345 }
4346 /* 64-bit capable CPUs. */
4347# if HC_ARCH_BITS == 64
4348 Assert(!RT_HI_U32(pCtx->cs.u64Base));
4349 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
4350 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
4351 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
4352# endif
4353 }
4354 else if ( CPUMIsGuestInV86ModeEx(pCtx)
4355 || ( CPUMIsGuestInRealModeEx(pCtx)
4356 && !pVM->hm.s.vmx.fUnrestrictedGuest))
4357 {
4358 /* Real and v86 mode checks. */
4359 /* hmR0VmxExportGuestSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
4360 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
4361 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4362 {
4363 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
4364 }
4365 else
4366 {
4367 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
4368 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
4369 }
4370
4371 /* CS */
4372 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
4373 Assert(pCtx->cs.u32Limit == 0xffff);
4374 Assert(u32CSAttr == 0xf3);
4375 /* SS */
4376 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
4377 Assert(pCtx->ss.u32Limit == 0xffff);
4378 Assert(u32SSAttr == 0xf3);
4379 /* DS */
4380 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
4381 Assert(pCtx->ds.u32Limit == 0xffff);
4382 Assert(u32DSAttr == 0xf3);
4383 /* ES */
4384 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
4385 Assert(pCtx->es.u32Limit == 0xffff);
4386 Assert(u32ESAttr == 0xf3);
4387 /* FS */
4388 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
4389 Assert(pCtx->fs.u32Limit == 0xffff);
4390 Assert(u32FSAttr == 0xf3);
4391 /* GS */
4392 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
4393 Assert(pCtx->gs.u32Limit == 0xffff);
4394 Assert(u32GSAttr == 0xf3);
4395 /* 64-bit capable CPUs. */
4396# if HC_ARCH_BITS == 64
4397 Assert(!RT_HI_U32(pCtx->cs.u64Base));
4398 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
4399 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
4400 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
4401# endif
4402 }
4403}
4404#endif /* VBOX_STRICT */
4405
4406
4407/**
4408 * Exports a guest segment register into the guest-state area in the VMCS.
4409 *
4410 * @returns VBox status code.
4411 * @param pVCpu The cross context virtual CPU structure.
4412 * @param idxSel Index of the selector in the VMCS.
4413 * @param idxLimit Index of the segment limit in the VMCS.
4414 * @param idxBase Index of the segment base in the VMCS.
4415 * @param idxAccess Index of the access rights of the segment in the VMCS.
4416 * @param pSelReg Pointer to the segment selector.
4417 *
4418 * @remarks No-long-jump zone!!!
4419 */
4420static int hmR0VmxExportGuestSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
4421 PCCPUMSELREG pSelReg)
4422{
4423 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
4424 rc |= VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
4425 rc |= VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
4426 AssertRCReturn(rc, rc);
4427
4428 uint32_t u32Access = pSelReg->Attr.u;
4429 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4430 {
4431 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
4432 u32Access = 0xf3;
4433 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
4434 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
4435 }
4436 else
4437 {
4438 /*
4439 * The way to differentiate between whether this is really a null selector or was just
4440 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
4441 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
4442 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
4443 * NULL selectors loaded in protected-mode have their attribute as 0.
4444 */
4445 if (!u32Access)
4446 u32Access = X86DESCATTR_UNUSABLE;
4447 }
4448
4449 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
4450 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
4451 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
4452
4453 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
4454 AssertRCReturn(rc, rc);
4455 return rc;
4456}
4457
4458
4459/**
4460 * Exports the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
4461 * into the guest-state area in the VMCS.
4462 *
4463 * @returns VBox status code.
4464 * @param pVCpu The cross context virtual CPU structure.
4465 *
4466 * @remarks Will import guest CR0 on strict builds during validation of
4467 * segments.
4468 * @remarks No-long-jump zone!!!
4469 */
4470static int hmR0VmxExportGuestSegmentRegs(PVMCPU pVCpu)
4471{
4472 int rc = VERR_INTERNAL_ERROR_5;
4473 PVM pVM = pVCpu->CTX_SUFF(pVM);
4474 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4475
4476 /*
4477 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
4478 */
4479 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
4480 {
4481#ifdef VBOX_WITH_REM
4482 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
4483 {
4484 Assert(pVM->hm.s.vmx.pRealModeTSS);
4485 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
4486 if ( pVCpu->hm.s.vmx.fWasInRealMode
4487 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
4488 {
4489 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
4490 in real-mode (e.g. OpenBSD 4.0) */
4491 REMFlushTBs(pVM);
4492 Log4Func(("Switch to protected mode detected!\n"));
4493 pVCpu->hm.s.vmx.fWasInRealMode = false;
4494 }
4495 }
4496#endif
4497 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CS)
4498 {
4499 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
4500 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4501 pVCpu->hm.s.vmx.RealMode.AttrCS.u = pCtx->cs.Attr.u;
4502 rc = HMVMX_EXPORT_SREG(CS, &pCtx->cs);
4503 AssertRCReturn(rc, rc);
4504 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CS);
4505 }
4506
4507 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SS)
4508 {
4509 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
4510 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4511 pVCpu->hm.s.vmx.RealMode.AttrSS.u = pCtx->ss.Attr.u;
4512 rc = HMVMX_EXPORT_SREG(SS, &pCtx->ss);
4513 AssertRCReturn(rc, rc);
4514 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SS);
4515 }
4516
4517 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_DS)
4518 {
4519 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
4520 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4521 pVCpu->hm.s.vmx.RealMode.AttrDS.u = pCtx->ds.Attr.u;
4522 rc = HMVMX_EXPORT_SREG(DS, &pCtx->ds);
4523 AssertRCReturn(rc, rc);
4524 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_DS);
4525 }
4526
4527 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_ES)
4528 {
4529 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
4530 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4531 pVCpu->hm.s.vmx.RealMode.AttrES.u = pCtx->es.Attr.u;
4532 rc = HMVMX_EXPORT_SREG(ES, &pCtx->es);
4533 AssertRCReturn(rc, rc);
4534 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_ES);
4535 }
4536
4537 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_FS)
4538 {
4539 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
4540 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4541 pVCpu->hm.s.vmx.RealMode.AttrFS.u = pCtx->fs.Attr.u;
4542 rc = HMVMX_EXPORT_SREG(FS, &pCtx->fs);
4543 AssertRCReturn(rc, rc);
4544 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_FS);
4545 }
4546
4547 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GS)
4548 {
4549 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
4550 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4551 pVCpu->hm.s.vmx.RealMode.AttrGS.u = pCtx->gs.Attr.u;
4552 rc = HMVMX_EXPORT_SREG(GS, &pCtx->gs);
4553 AssertRCReturn(rc, rc);
4554 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GS);
4555 }
4556
4557#ifdef VBOX_STRICT
4558 hmR0VmxValidateSegmentRegs(pVCpu);
4559#endif
4560
4561 Log4Func(("CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pCtx->cs.Sel, pCtx->cs.u64Base,
4562 pCtx->cs.u32Limit, pCtx->cs.Attr.u));
4563 }
4564
4565 /*
4566 * Guest TR.
4567 */
4568 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_TR)
4569 {
4570 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
4571
4572 /*
4573 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
4574 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
4575 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
4576 */
4577 uint16_t u16Sel = 0;
4578 uint32_t u32Limit = 0;
4579 uint64_t u64Base = 0;
4580 uint32_t u32AccessRights = 0;
4581
4582 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4583 {
4584 u16Sel = pCtx->tr.Sel;
4585 u32Limit = pCtx->tr.u32Limit;
4586 u64Base = pCtx->tr.u64Base;
4587 u32AccessRights = pCtx->tr.Attr.u;
4588 }
4589 else
4590 {
4591 Assert(pVM->hm.s.vmx.pRealModeTSS);
4592 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
4593
4594 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
4595 RTGCPHYS GCPhys;
4596 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
4597 AssertRCReturn(rc, rc);
4598
4599 X86DESCATTR DescAttr;
4600 DescAttr.u = 0;
4601 DescAttr.n.u1Present = 1;
4602 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
4603
4604 u16Sel = 0;
4605 u32Limit = HM_VTX_TSS_SIZE;
4606 u64Base = GCPhys; /* in real-mode phys = virt. */
4607 u32AccessRights = DescAttr.u;
4608 }
4609
4610 /* Validate. */
4611 Assert(!(u16Sel & RT_BIT(2)));
4612 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
4613 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
4614 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
4615 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
4616 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
4617 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
4618 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
4619 Assert( (u32Limit & 0xfff) == 0xfff
4620 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
4621 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
4622 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
4623
4624 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_TR_SEL, u16Sel);
4625 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit);
4626 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights);
4627 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base);
4628 AssertRCReturn(rc, rc);
4629
4630 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_TR);
4631 Log4Func(("TR base=%#RX64\n", pCtx->tr.u64Base));
4632 }
4633
4634 /*
4635 * Guest GDTR.
4636 */
4637 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GDTR)
4638 {
4639 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
4640
4641 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt);
4642 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt);
4643 AssertRCReturn(rc, rc);
4644
4645 /* Validate. */
4646 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4647
4648 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
4649 Log4Func(("GDTR base=%#RX64\n", pCtx->gdtr.pGdt));
4650 }
4651
4652 /*
4653 * Guest LDTR.
4654 */
4655 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_LDTR)
4656 {
4657 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
4658
4659 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
4660 uint32_t u32Access = 0;
4661 if (!pCtx->ldtr.Attr.u)
4662 u32Access = X86DESCATTR_UNUSABLE;
4663 else
4664 u32Access = pCtx->ldtr.Attr.u;
4665
4666 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel);
4667 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit);
4668 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access);
4669 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base);
4670 AssertRCReturn(rc, rc);
4671
4672 /* Validate. */
4673 if (!(u32Access & X86DESCATTR_UNUSABLE))
4674 {
4675 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
4676 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
4677 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
4678 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
4679 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
4680 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
4681 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
4682 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
4683 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
4684 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
4685 }
4686
4687 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
4688 Log4Func(("LDTR base=%#RX64\n", pCtx->ldtr.u64Base));
4689 }
4690
4691 /*
4692 * Guest IDTR.
4693 */
4694 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_IDTR)
4695 {
4696 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
4697
4698 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt);
4699 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt);
4700 AssertRCReturn(rc, rc);
4701
4702 /* Validate. */
4703 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4704
4705 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
4706 Log4Func(("IDTR base=%#RX64\n", pCtx->idtr.pIdt));
4707 }
4708
4709 return VINF_SUCCESS;
4710}
4711
4712
4713/**
4714 * Exports certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
4715 * areas.
4716 *
4717 * These MSRs will automatically be loaded to the host CPU on every successful
4718 * VM-entry and stored from the host CPU on every successful VM-exit. This also
4719 * creates/updates MSR slots for the host MSRs. The actual host MSR values are
4720 * -not- updated here for performance reasons. See hmR0VmxExportHostMsrs().
4721 *
4722 * Also exports the guest sysenter MSRs into the guest-state area in the VMCS.
4723 *
4724 * @returns VBox status code.
4725 * @param pVCpu The cross context virtual CPU structure.
4726 *
4727 * @remarks No-long-jump zone!!!
4728 */
4729static int hmR0VmxExportGuestMsrs(PVMCPU pVCpu)
4730{
4731 AssertPtr(pVCpu);
4732 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
4733
4734 /*
4735 * MSRs that we use the auto-load/store MSR area in the VMCS.
4736 * For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs().
4737 */
4738 PVM pVM = pVCpu->CTX_SUFF(pVM);
4739 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4740 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
4741 {
4742 if (pVM->hm.s.fAllow64BitGuests)
4743 {
4744#if HC_ARCH_BITS == 32
4745 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_KERNEL_GS_BASE);
4746
4747 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pCtx->msrLSTAR, false, NULL);
4748 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pCtx->msrSTAR, false, NULL);
4749 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pCtx->msrSFMASK, false, NULL);
4750 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE, false, NULL);
4751 AssertRCReturn(rc, rc);
4752# ifdef LOG_ENABLED
4753 PCVMXAUTOMSR pMsr = (PCVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
4754 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
4755 Log4Func(("MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value));
4756# endif
4757#endif
4758 }
4759 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_AUTO_MSRS);
4760 }
4761
4762 /*
4763 * Guest Sysenter MSRs.
4764 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
4765 * VM-exits on WRMSRs for these MSRs.
4766 */
4767 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
4768 {
4769 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
4770
4771 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
4772 {
4773 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pCtx->SysEnter.cs);
4774 AssertRCReturn(rc, rc);
4775 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_CS_MSR);
4776 }
4777
4778 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
4779 {
4780 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip);
4781 AssertRCReturn(rc, rc);
4782 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
4783 }
4784
4785 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
4786 {
4787 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp);
4788 AssertRCReturn(rc, rc);
4789 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
4790 }
4791 }
4792
4793 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR)
4794 {
4795 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
4796
4797 if (hmR0VmxShouldSwapEferMsr(pVCpu))
4798 {
4799 /*
4800 * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option
4801 * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}.
4802 */
4803 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
4804 {
4805 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pCtx->msrEFER);
4806 AssertRCReturn(rc,rc);
4807 Log4Func(("EFER=%#RX64\n", pCtx->msrEFER));
4808 }
4809 else
4810 {
4811 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pCtx->msrEFER, false /* fUpdateHostMsr */,
4812 NULL /* pfAddedAndUpdated */);
4813 AssertRCReturn(rc, rc);
4814
4815 /* We need to intercept reads too, see @bugref{7386#c16}. */
4816 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
4817 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
4818 Log4Func(("MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pCtx->msrEFER,
4819 pVCpu->hm.s.vmx.cMsrs));
4820 }
4821 }
4822 else if (!pVM->hm.s.vmx.fSupportsVmcsEfer)
4823 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER);
4824 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR);
4825 }
4826
4827 return VINF_SUCCESS;
4828}
4829
4830
4831#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
4832/**
4833 * Check if guest state allows safe use of 32-bit switcher again.
4834 *
4835 * Segment bases and protected mode structures must be 32-bit addressable
4836 * because the 32-bit switcher will ignore high dword when writing these VMCS
4837 * fields. See @bugref{8432} for details.
4838 *
4839 * @returns true if safe, false if must continue to use the 64-bit switcher.
4840 * @param pCtx Pointer to the guest-CPU context.
4841 *
4842 * @remarks No-long-jump zone!!!
4843 */
4844static bool hmR0VmxIs32BitSwitcherSafe(PCCPUMCTX pCtx)
4845{
4846 if (pCtx->gdtr.pGdt & UINT64_C(0xffffffff00000000)) return false;
4847 if (pCtx->idtr.pIdt & UINT64_C(0xffffffff00000000)) return false;
4848 if (pCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000)) return false;
4849 if (pCtx->tr.u64Base & UINT64_C(0xffffffff00000000)) return false;
4850 if (pCtx->es.u64Base & UINT64_C(0xffffffff00000000)) return false;
4851 if (pCtx->cs.u64Base & UINT64_C(0xffffffff00000000)) return false;
4852 if (pCtx->ss.u64Base & UINT64_C(0xffffffff00000000)) return false;
4853 if (pCtx->ds.u64Base & UINT64_C(0xffffffff00000000)) return false;
4854 if (pCtx->fs.u64Base & UINT64_C(0xffffffff00000000)) return false;
4855 if (pCtx->gs.u64Base & UINT64_C(0xffffffff00000000)) return false;
4856
4857 /* All good, bases are 32-bit. */
4858 return true;
4859}
4860#endif
4861
4862
4863/**
4864 * Selects up the appropriate function to run guest code.
4865 *
4866 * @returns VBox status code.
4867 * @param pVCpu The cross context virtual CPU structure.
4868 *
4869 * @remarks No-long-jump zone!!!
4870 */
4871static int hmR0VmxSelectVMRunHandler(PVMCPU pVCpu)
4872{
4873 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4874 if (CPUMIsGuestInLongModeEx(pCtx))
4875 {
4876#ifndef VBOX_ENABLE_64_BITS_GUESTS
4877 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4878#endif
4879 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
4880#if HC_ARCH_BITS == 32
4881 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
4882 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
4883 {
4884#ifdef VBOX_STRICT
4885 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4886 {
4887 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4888 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
4889 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
4890 AssertMsg(fCtxChanged & ( HM_CHANGED_VMX_EXIT_CTLS
4891 | HM_CHANGED_VMX_ENTRY_CTLS
4892 | HM_CHANGED_GUEST_EFER_MSR), ("fCtxChanged=%#RX64\n", fCtxChanged));
4893 }
4894#endif
4895 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
4896
4897 /* Mark that we've switched to 64-bit handler, we can't safely switch back to 32-bit for
4898 the rest of the VM run (until VM reset). See @bugref{8432#c7}. */
4899 pVCpu->hm.s.vmx.fSwitchedTo64on32 = true;
4900 Log4Func(("Selected 64-bit switcher\n"));
4901 }
4902#else
4903 /* 64-bit host. */
4904 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
4905#endif
4906 }
4907 else
4908 {
4909 /* Guest is not in long mode, use the 32-bit handler. */
4910#if HC_ARCH_BITS == 32
4911 if ( pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32
4912 && !pVCpu->hm.s.vmx.fSwitchedTo64on32 /* If set, guest mode change does not imply switcher change. */
4913 && pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4914 {
4915# ifdef VBOX_STRICT
4916 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4917 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
4918 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
4919 AssertMsg(fCtxChanged & ( HM_CHANGED_VMX_EXIT_CTLS
4920 | HM_CHANGED_VMX_ENTRY_CTLS
4921 | HM_CHANGED_GUEST_EFER_MSR), ("fCtxChanged=%#RX64\n", fCtxChanged));
4922# endif
4923 }
4924# ifdef VBOX_ENABLE_64_BITS_GUESTS
4925 /*
4926 * Keep using the 64-bit switcher even though we're in 32-bit because of bad Intel
4927 * design, see @bugref{8432#c7}. If real-on-v86 mode is active, clear the 64-bit
4928 * switcher flag because now we know the guest is in a sane state where it's safe
4929 * to use the 32-bit switcher. Otherwise check the guest state if it's safe to use
4930 * the much faster 32-bit switcher again.
4931 */
4932 if (!pVCpu->hm.s.vmx.fSwitchedTo64on32)
4933 {
4934 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32)
4935 Log4Func(("Selected 32-bit switcher\n"));
4936 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4937 }
4938 else
4939 {
4940 Assert(pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64);
4941 if ( pVCpu->hm.s.vmx.RealMode.fRealOnV86Active
4942 || hmR0VmxIs32BitSwitcherSafe(pCtx))
4943 {
4944 pVCpu->hm.s.vmx.fSwitchedTo64on32 = false;
4945 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4946 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR
4947 | HM_CHANGED_VMX_ENTRY_CTLS
4948 | HM_CHANGED_VMX_EXIT_CTLS
4949 | HM_CHANGED_HOST_CONTEXT);
4950 Log4Func(("Selected 32-bit switcher (safe)\n"));
4951 }
4952 }
4953# else
4954 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4955# endif
4956#else
4957 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4958#endif
4959 }
4960 Assert(pVCpu->hm.s.vmx.pfnStartVM);
4961 return VINF_SUCCESS;
4962}
4963
4964
4965/**
4966 * Wrapper for running the guest code in VT-x.
4967 *
4968 * @returns VBox status code, no informational status codes.
4969 * @param pVCpu The cross context virtual CPU structure.
4970 *
4971 * @remarks No-long-jump zone!!!
4972 */
4973DECLINLINE(int) hmR0VmxRunGuest(PVMCPU pVCpu)
4974{
4975 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
4976 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4977 pCtx->fExtrn |= HMVMX_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
4978
4979 /*
4980 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses
4981 * floating-point operations using SSE instructions. Some XMM registers (XMM6-XMM15) are
4982 * callee-saved and thus the need for this XMM wrapper.
4983 *
4984 * See MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage".
4985 */
4986 bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
4987 /** @todo Add stats for resume vs launch. */
4988 PVM pVM = pVCpu->CTX_SUFF(pVM);
4989#ifdef VBOX_WITH_KERNEL_USING_XMM
4990 int rc = hmR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VmcsBatchCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
4991#else
4992 int rc = pVCpu->hm.s.vmx.pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VmcsBatchCache, pVM, pVCpu);
4993#endif
4994 AssertMsg(rc <= VINF_SUCCESS, ("%Rrc\n", rc));
4995 return rc;
4996}
4997
4998
4999/**
5000 * Reports world-switch error and dumps some useful debug info.
5001 *
5002 * @param pVCpu The cross context virtual CPU structure.
5003 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
5004 * @param pVmxTransient Pointer to the VMX transient structure (only
5005 * exitReason updated).
5006 */
5007static void hmR0VmxReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun, PVMXTRANSIENT pVmxTransient)
5008{
5009 Assert(pVCpu);
5010 Assert(pVmxTransient);
5011 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5012
5013 Log4Func(("VM-entry failure: %Rrc\n", rcVMRun));
5014 switch (rcVMRun)
5015 {
5016 case VERR_VMX_INVALID_VMXON_PTR:
5017 AssertFailed();
5018 break;
5019 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
5020 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
5021 {
5022 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
5023 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
5024 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
5025 AssertRC(rc);
5026
5027 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
5028 /* LastError.idCurrentCpu was already updated in hmR0VmxPreRunGuestCommitted().
5029 Cannot do it here as we may have been long preempted. */
5030
5031#ifdef VBOX_STRICT
5032 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
5033 pVmxTransient->uExitReason));
5034 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQual));
5035 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
5036 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
5037 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));
5038 else
5039 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
5040 Log4(("Entered host CPU %u\n", pVCpu->hm.s.vmx.LastError.idEnteredCpu));
5041 Log4(("Current host CPU %u\n", pVCpu->hm.s.vmx.LastError.idCurrentCpu));
5042
5043 /* VMX control bits. */
5044 uint32_t u32Val;
5045 uint64_t u64Val;
5046 RTHCUINTREG uHCReg;
5047 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc);
5048 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));
5049 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc);
5050 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));
5051 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
5052 {
5053 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc);
5054 Log4(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));
5055 }
5056 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc);
5057 Log4(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));
5058 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc);
5059 Log4(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));
5060 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
5061 Log4(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
5062 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
5063 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
5064 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
5065 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
5066 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
5067 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
5068 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
5069 Log4(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
5070 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
5071 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
5072 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
5073 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
5074 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
5075 Log4(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
5076 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
5077 Log4(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
5078 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
5079 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
5080 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
5081 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
5082 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
5083 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
5084 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
5085 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
5086 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
5087 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
5088 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
5089 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
5090 if (pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
5091 {
5092 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
5093 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
5094 }
5095
5096 /* Guest bits. */
5097 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc);
5098 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pVCpu->cpum.GstCtx.rip, u64Val));
5099 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc);
5100 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pVCpu->cpum.GstCtx.rsp, u64Val));
5101 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
5102 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pVCpu->cpum.GstCtx.eflags.u32, u32Val));
5103 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid)
5104 {
5105 rc = VMXReadVmcs32(VMX_VMCS16_VPID, &u32Val); AssertRC(rc);
5106 Log4(("VMX_VMCS16_VPID %u\n", u32Val));
5107 }
5108
5109 /* Host bits. */
5110 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
5111 Log4(("Host CR0 %#RHr\n", uHCReg));
5112 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
5113 Log4(("Host CR3 %#RHr\n", uHCReg));
5114 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
5115 Log4(("Host CR4 %#RHr\n", uHCReg));
5116
5117 RTGDTR HostGdtr;
5118 PCX86DESCHC pDesc;
5119 ASMGetGDTR(&HostGdtr);
5120 rc = VMXReadVmcs32(VMX_VMCS16_HOST_CS_SEL, &u32Val); AssertRC(rc);
5121 Log4(("Host CS %#08x\n", u32Val));
5122 if (u32Val < HostGdtr.cbGdt)
5123 {
5124 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5125 hmR0DumpDescriptor(pDesc, u32Val, "CS: ");
5126 }
5127
5128 rc = VMXReadVmcs32(VMX_VMCS16_HOST_DS_SEL, &u32Val); AssertRC(rc);
5129 Log4(("Host DS %#08x\n", u32Val));
5130 if (u32Val < HostGdtr.cbGdt)
5131 {
5132 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5133 hmR0DumpDescriptor(pDesc, u32Val, "DS: ");
5134 }
5135
5136 rc = VMXReadVmcs32(VMX_VMCS16_HOST_ES_SEL, &u32Val); AssertRC(rc);
5137 Log4(("Host ES %#08x\n", u32Val));
5138 if (u32Val < HostGdtr.cbGdt)
5139 {
5140 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5141 hmR0DumpDescriptor(pDesc, u32Val, "ES: ");
5142 }
5143
5144 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FS_SEL, &u32Val); AssertRC(rc);
5145 Log4(("Host FS %#08x\n", u32Val));
5146 if (u32Val < HostGdtr.cbGdt)
5147 {
5148 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5149 hmR0DumpDescriptor(pDesc, u32Val, "FS: ");
5150 }
5151
5152 rc = VMXReadVmcs32(VMX_VMCS16_HOST_GS_SEL, &u32Val); AssertRC(rc);
5153 Log4(("Host GS %#08x\n", u32Val));
5154 if (u32Val < HostGdtr.cbGdt)
5155 {
5156 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5157 hmR0DumpDescriptor(pDesc, u32Val, "GS: ");
5158 }
5159
5160 rc = VMXReadVmcs32(VMX_VMCS16_HOST_SS_SEL, &u32Val); AssertRC(rc);
5161 Log4(("Host SS %#08x\n", u32Val));
5162 if (u32Val < HostGdtr.cbGdt)
5163 {
5164 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5165 hmR0DumpDescriptor(pDesc, u32Val, "SS: ");
5166 }
5167
5168 rc = VMXReadVmcs32(VMX_VMCS16_HOST_TR_SEL, &u32Val); AssertRC(rc);
5169 Log4(("Host TR %#08x\n", u32Val));
5170 if (u32Val < HostGdtr.cbGdt)
5171 {
5172 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5173 hmR0DumpDescriptor(pDesc, u32Val, "TR: ");
5174 }
5175
5176 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
5177 Log4(("Host TR Base %#RHv\n", uHCReg));
5178 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
5179 Log4(("Host GDTR Base %#RHv\n", uHCReg));
5180 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
5181 Log4(("Host IDTR Base %#RHv\n", uHCReg));
5182 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
5183 Log4(("Host SYSENTER CS %#08x\n", u32Val));
5184 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
5185 Log4(("Host SYSENTER EIP %#RHv\n", uHCReg));
5186 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
5187 Log4(("Host SYSENTER ESP %#RHv\n", uHCReg));
5188 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
5189 Log4(("Host RSP %#RHv\n", uHCReg));
5190 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
5191 Log4(("Host RIP %#RHv\n", uHCReg));
5192# if HC_ARCH_BITS == 64
5193 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
5194 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
5195 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
5196 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
5197 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
5198 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
5199# endif
5200#endif /* VBOX_STRICT */
5201 break;
5202 }
5203
5204 default:
5205 /* Impossible */
5206 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
5207 break;
5208 }
5209}
5210
5211
5212#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
5213#ifndef VMX_USE_CACHED_VMCS_ACCESSES
5214# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
5215#endif
5216#ifdef VBOX_STRICT
5217static bool hmR0VmxIsValidWriteField(uint32_t idxField)
5218{
5219 switch (idxField)
5220 {
5221 case VMX_VMCS_GUEST_RIP:
5222 case VMX_VMCS_GUEST_RSP:
5223 case VMX_VMCS_GUEST_SYSENTER_EIP:
5224 case VMX_VMCS_GUEST_SYSENTER_ESP:
5225 case VMX_VMCS_GUEST_GDTR_BASE:
5226 case VMX_VMCS_GUEST_IDTR_BASE:
5227 case VMX_VMCS_GUEST_CS_BASE:
5228 case VMX_VMCS_GUEST_DS_BASE:
5229 case VMX_VMCS_GUEST_ES_BASE:
5230 case VMX_VMCS_GUEST_FS_BASE:
5231 case VMX_VMCS_GUEST_GS_BASE:
5232 case VMX_VMCS_GUEST_SS_BASE:
5233 case VMX_VMCS_GUEST_LDTR_BASE:
5234 case VMX_VMCS_GUEST_TR_BASE:
5235 case VMX_VMCS_GUEST_CR3:
5236 return true;
5237 }
5238 return false;
5239}
5240
5241static bool hmR0VmxIsValidReadField(uint32_t idxField)
5242{
5243 switch (idxField)
5244 {
5245 /* Read-only fields. */
5246 case VMX_VMCS_RO_EXIT_QUALIFICATION:
5247 return true;
5248 }
5249 /* Remaining readable fields should also be writable. */
5250 return hmR0VmxIsValidWriteField(idxField);
5251}
5252#endif /* VBOX_STRICT */
5253
5254
5255/**
5256 * Executes the specified handler in 64-bit mode.
5257 *
5258 * @returns VBox status code (no informational status codes).
5259 * @param pVCpu The cross context virtual CPU structure.
5260 * @param enmOp The operation to perform.
5261 * @param cParams Number of parameters.
5262 * @param paParam Array of 32-bit parameters.
5263 */
5264VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, uint32_t cParams, uint32_t *paParam)
5265{
5266 PVM pVM = pVCpu->CTX_SUFF(pVM);
5267 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
5268 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
5269 Assert(pVCpu->hm.s.vmx.VmcsBatchCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VmcsBatchCache.Write.aField));
5270 Assert(pVCpu->hm.s.vmx.VmcsBatchCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VmcsBatchCache.Read.aField));
5271
5272#ifdef VBOX_STRICT
5273 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VmcsBatchCache.Write.cValidEntries; i++)
5274 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VmcsBatchCache.Write.aField[i]));
5275
5276 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VmcsBatchCache.Read.cValidEntries; i++)
5277 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VmcsBatchCache.Read.aField[i]));
5278#endif
5279
5280 /* Disable interrupts. */
5281 RTCCUINTREG fOldEFlags = ASMIntDisableFlags();
5282
5283#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
5284 RTCPUID idHostCpu = RTMpCpuId();
5285 CPUMR0SetLApic(pVCpu, idHostCpu);
5286#endif
5287
5288 PCHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
5289 RTHCPHYS HCPhysCpuPage = pHostCpu->HCPhysMemObj;
5290
5291 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
5292 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5293 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR;
5294
5295 /* Leave VMX Root Mode. */
5296 VMXDisable();
5297
5298 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
5299
5300 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
5301 CPUMSetHyperEIP(pVCpu, enmOp);
5302 for (int i = (int)cParams - 1; i >= 0; i--)
5303 CPUMPushHyper(pVCpu, paParam[i]);
5304
5305 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
5306
5307 /* Call the switcher. */
5308 int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_UOFFSETOF_DYN(VM, aCpus[pVCpu->idCpu].cpum) - RT_UOFFSETOF(VM, cpum));
5309 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
5310
5311 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
5312 /* Make sure the VMX instructions don't cause #UD faults. */
5313 SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX);
5314
5315 /* Re-enter VMX Root Mode */
5316 int rc2 = VMXEnable(HCPhysCpuPage);
5317 if (RT_FAILURE(rc2))
5318 {
5319 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
5320 ASMSetFlags(fOldEFlags);
5321 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
5322 return rc2;
5323 }
5324
5325 rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5326 AssertRC(rc2);
5327 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_ACTIVE;
5328 Assert(!(ASMGetFlags() & X86_EFL_IF));
5329 ASMSetFlags(fOldEFlags);
5330 return rc;
5331}
5332
5333
5334/**
5335 * Prepares for and executes VMLAUNCH (64-bit guests) for 32-bit hosts
5336 * supporting 64-bit guests.
5337 *
5338 * @returns VBox status code.
5339 * @param fResume Whether to VMLAUNCH or VMRESUME.
5340 * @param pCtx Pointer to the guest-CPU context.
5341 * @param pCache Pointer to the VMCS cache.
5342 * @param pVM The cross context VM structure.
5343 * @param pVCpu The cross context virtual CPU structure.
5344 */
5345DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSBATCHCACHE pCache, PVM pVM, PVMCPU pVCpu)
5346{
5347 NOREF(fResume);
5348
5349 PCHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
5350 RTHCPHYS const HCPhysCpuPage = pHostCpu->HCPhysMemObj;
5351
5352#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5353 pCache->uPos = 1;
5354 pCache->interPD = PGMGetInterPaeCR3(pVM);
5355 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
5356#endif
5357
5358#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5359 pCache->TestIn.HCPhysCpuPage = 0;
5360 pCache->TestIn.HCPhysVmcs = 0;
5361 pCache->TestIn.pCache = 0;
5362 pCache->TestOut.HCPhysVmcs = 0;
5363 pCache->TestOut.pCache = 0;
5364 pCache->TestOut.pCtx = 0;
5365 pCache->TestOut.eflags = 0;
5366#else
5367 NOREF(pCache);
5368#endif
5369
5370 uint32_t aParam[10];
5371 aParam[0] = RT_LO_U32(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
5372 aParam[1] = RT_HI_U32(HCPhysCpuPage); /* Param 1: VMXON physical address - Hi. */
5373 aParam[2] = RT_LO_U32(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
5374 aParam[3] = RT_HI_U32(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Hi. */
5375 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsBatchCache);
5376 aParam[5] = 0;
5377 aParam[6] = VM_RC_ADDR(pVM, pVM);
5378 aParam[7] = 0;
5379 aParam[8] = VM_RC_ADDR(pVM, pVCpu);
5380 aParam[9] = 0;
5381
5382#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5383 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
5384 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
5385#endif
5386 int rc = VMXR0Execute64BitsHandler(pVCpu, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]);
5387
5388#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5389 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
5390 Assert(pCtx->dr[4] == 10);
5391 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
5392#endif
5393
5394#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5395 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
5396 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5397 pVCpu->hm.s.vmx.HCPhysVmcs));
5398 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5399 pCache->TestOut.HCPhysVmcs));
5400 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
5401 pCache->TestOut.pCache));
5402 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsBatchCache),
5403 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsBatchCache)));
5404 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
5405 pCache->TestOut.pCtx));
5406 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
5407#endif
5408 NOREF(pCtx);
5409 return rc;
5410}
5411
5412
5413/**
5414 * Initialize the VMCS-Read cache.
5415 *
5416 * The VMCS cache is used for 32-bit hosts running 64-bit guests (except 32-bit
5417 * Darwin which runs with 64-bit paging in 32-bit mode) for 64-bit fields that
5418 * cannot be accessed in 32-bit mode. Some 64-bit fields -can- be accessed
5419 * (those that have a 32-bit FULL & HIGH part).
5420 *
5421 * @returns VBox status code.
5422 * @param pVCpu The cross context virtual CPU structure.
5423 */
5424static int hmR0VmxInitVmcsReadCache(PVMCPU pVCpu)
5425{
5426#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
5427 do { \
5428 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
5429 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
5430 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
5431 ++cReadFields; \
5432 } while (0)
5433
5434 PVMXVMCSBATCHCACHE pCache = &pVCpu->hm.s.vmx.VmcsBatchCache;
5435 uint32_t cReadFields = 0;
5436
5437 /*
5438 * Don't remove the #if 0'd fields in this code. They're listed here for consistency
5439 * and serve to indicate exceptions to the rules.
5440 */
5441
5442 /* Guest-natural selector base fields. */
5443#if 0
5444 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
5445 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
5446 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
5447#endif
5448 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
5449 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
5450 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
5451 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
5452 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
5453 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
5454 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
5455 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
5456 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
5457 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
5458 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
5459 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
5460#if 0
5461 /* Unused natural width guest-state fields. */
5462 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS);
5463 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
5464#endif
5465 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
5466 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
5467
5468 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for
5469 these 64-bit fields (using "FULL" and "HIGH" fields). */
5470#if 0
5471 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
5472 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
5473 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
5474 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
5475 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
5476 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
5477 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
5478 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
5479 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
5480#endif
5481
5482 /* Natural width guest-state fields. */
5483 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
5484 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_GUEST_LINEAR_ADDR);
5485
5486 if (pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
5487 {
5488 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
5489 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
5490 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
5491 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
5492 }
5493 else
5494 {
5495 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
5496 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
5497 }
5498
5499#undef VMXLOCAL_INIT_READ_CACHE_FIELD
5500 return VINF_SUCCESS;
5501}
5502
5503
5504/**
5505 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
5506 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
5507 * darwin, running 64-bit guests).
5508 *
5509 * @returns VBox status code.
5510 * @param pVCpu The cross context virtual CPU structure.
5511 * @param idxField The VMCS field encoding.
5512 * @param u64Val 16, 32 or 64-bit value.
5513 */
5514VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5515{
5516 int rc;
5517 switch (idxField)
5518 {
5519 /*
5520 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
5521 */
5522 /* 64-bit Control fields. */
5523 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
5524 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
5525 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
5526 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
5527 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
5528 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
5529 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
5530 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
5531 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
5532 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
5533 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
5534 case VMX_VMCS64_CTRL_EPTP_FULL:
5535 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
5536 /* 64-bit Guest-state fields. */
5537 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
5538 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
5539 case VMX_VMCS64_GUEST_PAT_FULL:
5540 case VMX_VMCS64_GUEST_EFER_FULL:
5541 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
5542 case VMX_VMCS64_GUEST_PDPTE0_FULL:
5543 case VMX_VMCS64_GUEST_PDPTE1_FULL:
5544 case VMX_VMCS64_GUEST_PDPTE2_FULL:
5545 case VMX_VMCS64_GUEST_PDPTE3_FULL:
5546 /* 64-bit Host-state fields. */
5547 case VMX_VMCS64_HOST_PAT_FULL:
5548 case VMX_VMCS64_HOST_EFER_FULL:
5549 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
5550 {
5551 rc = VMXWriteVmcs32(idxField, RT_LO_U32(u64Val));
5552 rc |= VMXWriteVmcs32(idxField + 1, RT_HI_U32(u64Val));
5553 break;
5554 }
5555
5556 /*
5557 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
5558 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
5559 */
5560 /* Natural-width Guest-state fields. */
5561 case VMX_VMCS_GUEST_CR3:
5562 case VMX_VMCS_GUEST_ES_BASE:
5563 case VMX_VMCS_GUEST_CS_BASE:
5564 case VMX_VMCS_GUEST_SS_BASE:
5565 case VMX_VMCS_GUEST_DS_BASE:
5566 case VMX_VMCS_GUEST_FS_BASE:
5567 case VMX_VMCS_GUEST_GS_BASE:
5568 case VMX_VMCS_GUEST_LDTR_BASE:
5569 case VMX_VMCS_GUEST_TR_BASE:
5570 case VMX_VMCS_GUEST_GDTR_BASE:
5571 case VMX_VMCS_GUEST_IDTR_BASE:
5572 case VMX_VMCS_GUEST_RSP:
5573 case VMX_VMCS_GUEST_RIP:
5574 case VMX_VMCS_GUEST_SYSENTER_ESP:
5575 case VMX_VMCS_GUEST_SYSENTER_EIP:
5576 {
5577 if (!(RT_HI_U32(u64Val)))
5578 {
5579 /* If this field is 64-bit, VT-x will zero out the top bits. */
5580 rc = VMXWriteVmcs32(idxField, RT_LO_U32(u64Val));
5581 }
5582 else
5583 {
5584 /* Assert that only the 32->64 switcher case should ever come here. */
5585 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
5586 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
5587 }
5588 break;
5589 }
5590
5591 default:
5592 {
5593 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
5594 rc = VERR_INVALID_PARAMETER;
5595 break;
5596 }
5597 }
5598 AssertRCReturn(rc, rc);
5599 return rc;
5600}
5601
5602
5603/**
5604 * Queue up a VMWRITE by using the VMCS write cache.
5605 * This is only used on 32-bit hosts (except darwin) for 64-bit guests.
5606 *
5607 * @param pVCpu The cross context virtual CPU structure.
5608 * @param idxField The VMCS field encoding.
5609 * @param u64Val 16, 32 or 64-bit value.
5610 */
5611VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5612{
5613 AssertPtr(pVCpu);
5614 PVMXVMCSBATCHCACHE pCache = &pVCpu->hm.s.vmx.VmcsBatchCache;
5615
5616 AssertMsgReturn(pCache->Write.cValidEntries < VMX_VMCS_BATCH_CACHE_MAX_ENTRY - 1,
5617 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
5618
5619 /* Make sure there are no duplicates. */
5620 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
5621 {
5622 if (pCache->Write.aField[i] == idxField)
5623 {
5624 pCache->Write.aFieldVal[i] = u64Val;
5625 return VINF_SUCCESS;
5626 }
5627 }
5628
5629 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
5630 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
5631 pCache->Write.cValidEntries++;
5632 return VINF_SUCCESS;
5633}
5634#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
5635
5636
5637/**
5638 * Sets up the usage of TSC-offsetting and updates the VMCS.
5639 *
5640 * If offsetting is not possible, cause VM-exits on RDTSC(P)s. Also sets up the
5641 * VMX preemption timer.
5642 *
5643 * @returns VBox status code.
5644 * @param pVCpu The cross context virtual CPU structure.
5645 *
5646 * @remarks No-long-jump zone!!!
5647 */
5648static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu)
5649{
5650 bool fOffsettedTsc;
5651 bool fParavirtTsc;
5652 PVM pVM = pVCpu->CTX_SUFF(pVM);
5653 uint64_t uTscOffset;
5654 if (pVM->hm.s.vmx.fUsePreemptTimer)
5655 {
5656 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &uTscOffset, &fOffsettedTsc, &fParavirtTsc);
5657
5658 /* Make sure the returned values have sane upper and lower boundaries. */
5659 uint64_t u64CpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
5660 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
5661 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
5662 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
5663
5664 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
5665 int rc = VMXWriteVmcs32(VMX_VMCS32_PREEMPT_TIMER_VALUE, cPreemptionTickCount);
5666 AssertRC(rc);
5667 }
5668 else
5669 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
5670
5671 if (fParavirtTsc)
5672 {
5673 /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
5674 information before every VM-entry, hence disable it for performance sake. */
5675#if 0
5676 int rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
5677 AssertRC(rc);
5678#endif
5679 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
5680 }
5681
5682 uint32_t uProcCtls = pVCpu->hm.s.vmx.Ctls.u32ProcCtls;
5683 if ( fOffsettedTsc
5684 && RT_LIKELY(!pVCpu->hm.s.fDebugWantRdTscExit))
5685 {
5686 if (pVCpu->hm.s.vmx.Ctls.u64TscOffset != uTscOffset)
5687 {
5688 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, uTscOffset);
5689 AssertRC(rc);
5690 pVCpu->hm.s.vmx.Ctls.u64TscOffset = uTscOffset;
5691 }
5692
5693 if (uProcCtls & VMX_PROC_CTLS_RDTSC_EXIT)
5694 {
5695 uProcCtls &= ~VMX_PROC_CTLS_RDTSC_EXIT;
5696 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
5697 AssertRC(rc);
5698 pVCpu->hm.s.vmx.Ctls.u32ProcCtls = uProcCtls;
5699 }
5700 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
5701 }
5702 else
5703 {
5704 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
5705 if (!(uProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))
5706 {
5707 uProcCtls |= VMX_PROC_CTLS_RDTSC_EXIT;
5708 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
5709 AssertRC(rc);
5710 pVCpu->hm.s.vmx.Ctls.u32ProcCtls = uProcCtls;
5711 }
5712 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
5713 }
5714}
5715
5716
5717/**
5718 * Gets the IEM exception flags for the specified vector and IDT vectoring /
5719 * VM-exit interruption info type.
5720 *
5721 * @returns The IEM exception flags.
5722 * @param uVector The event vector.
5723 * @param uVmxVectorType The VMX event type.
5724 *
5725 * @remarks This function currently only constructs flags required for
5726 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
5727 * and CR2 aspects of an exception are not included).
5728 */
5729static uint32_t hmR0VmxGetIemXcptFlags(uint8_t uVector, uint32_t uVmxVectorType)
5730{
5731 uint32_t fIemXcptFlags;
5732 switch (uVmxVectorType)
5733 {
5734 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
5735 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
5736 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
5737 break;
5738
5739 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
5740 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
5741 break;
5742
5743 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
5744 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
5745 break;
5746
5747 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
5748 {
5749 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
5750 if (uVector == X86_XCPT_BP)
5751 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
5752 else if (uVector == X86_XCPT_OF)
5753 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
5754 else
5755 {
5756 fIemXcptFlags = 0;
5757 AssertMsgFailed(("Unexpected vector for software int. uVector=%#x", uVector));
5758 }
5759 break;
5760 }
5761
5762 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
5763 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
5764 break;
5765
5766 default:
5767 fIemXcptFlags = 0;
5768 AssertMsgFailed(("Unexpected vector type! uVmxVectorType=%#x uVector=%#x", uVmxVectorType, uVector));
5769 break;
5770 }
5771 return fIemXcptFlags;
5772}
5773
5774
5775/**
5776 * Sets an event as a pending event to be injected into the guest.
5777 *
5778 * @param pVCpu The cross context virtual CPU structure.
5779 * @param u32IntInfo The VM-entry interruption-information field.
5780 * @param cbInstr The VM-entry instruction length in bytes (for software
5781 * interrupts, exceptions and privileged software
5782 * exceptions).
5783 * @param u32ErrCode The VM-entry exception error code.
5784 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
5785 * page-fault.
5786 *
5787 * @remarks Statistics counter assumes this is a guest event being injected or
5788 * re-injected into the guest, i.e. 'StatInjectPendingReflect' is
5789 * always incremented.
5790 */
5791DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
5792 RTGCUINTPTR GCPtrFaultAddress)
5793{
5794 Assert(!pVCpu->hm.s.Event.fPending);
5795 pVCpu->hm.s.Event.fPending = true;
5796 pVCpu->hm.s.Event.u64IntInfo = u32IntInfo;
5797 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
5798 pVCpu->hm.s.Event.cbInstr = cbInstr;
5799 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
5800}
5801
5802
5803/**
5804 * Sets an external interrupt as pending-for-injection into the VM.
5805 *
5806 * @param pVCpu The cross context virtual CPU structure.
5807 * @param u8Interrupt The external interrupt vector.
5808 */
5809DECLINLINE(void) hmR0VmxSetPendingExtInt(PVMCPU pVCpu, uint8_t u8Interrupt)
5810{
5811 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
5812 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5813 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
5814 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
5815 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5816}
5817
5818
5819/**
5820 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
5821 *
5822 * @param pVCpu The cross context virtual CPU structure.
5823 */
5824DECLINLINE(void) hmR0VmxSetPendingXcptNmi(PVMCPU pVCpu)
5825{
5826 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
5827 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
5828 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
5829 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
5830 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5831}
5832
5833
5834/**
5835 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
5836 *
5837 * @param pVCpu The cross context virtual CPU structure.
5838 */
5839DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu)
5840{
5841 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
5842 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
5843 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
5844 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
5845 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5846}
5847
5848
5849/**
5850 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
5851 *
5852 * @param pVCpu The cross context virtual CPU structure.
5853 */
5854DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu)
5855{
5856 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
5857 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
5858 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
5859 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
5860 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5861}
5862
5863
5864/**
5865 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
5866 *
5867 * @param pVCpu The cross context virtual CPU structure.
5868 */
5869DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu)
5870{
5871 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
5872 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
5873 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
5874 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
5875 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5876}
5877
5878
5879#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5880/**
5881 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
5882 *
5883 * @param pVCpu The cross context virtual CPU structure.
5884 * @param u32ErrCode The error code for the general-protection exception.
5885 */
5886DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, uint32_t u32ErrCode)
5887{
5888 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
5889 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
5890 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
5891 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
5892 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
5893}
5894
5895
5896/**
5897 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
5898 *
5899 * @param pVCpu The cross context virtual CPU structure.
5900 * @param u32ErrCode The error code for the stack exception.
5901 */
5902DECLINLINE(void) hmR0VmxSetPendingXcptSS(PVMCPU pVCpu, uint32_t u32ErrCode)
5903{
5904 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
5905 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
5906 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
5907 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
5908 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
5909}
5910
5911
5912/**
5913 * Decodes the memory operand of an instruction that caused a VM-exit.
5914 *
5915 * The VM-exit qualification field provides the displacement field for memory
5916 * operand instructions, if any.
5917 *
5918 * @returns Strict VBox status code (i.e. informational status codes too).
5919 * @retval VINF_SUCCESS if the operand was successfully decoded.
5920 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
5921 * operand.
5922 * @param pVCpu The cross context virtual CPU structure.
5923 * @param uExitInstrInfo The VM-exit instruction information field.
5924 * @param enmMemAccess The memory operand's access type (read or write).
5925 * @param GCPtrDisp The instruction displacement field, if any. For
5926 * RIP-relative addressing pass RIP + displacement here.
5927 * @param pGCPtrMem Where to store the effective destination memory address.
5928 */
5929static VBOXSTRICTRC hmR0VmxDecodeMemOperand(PVMCPU pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
5930 PRTGCPTR pGCPtrMem)
5931{
5932 Assert(pGCPtrMem);
5933 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
5934 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
5935 | CPUMCTX_EXTRN_CR0);
5936
5937 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
5938 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
5939 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
5940
5941 VMXEXITINSTRINFO ExitInstrInfo;
5942 ExitInstrInfo.u = uExitInstrInfo;
5943 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
5944 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
5945 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
5946 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
5947 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
5948 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
5949 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
5950 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
5951 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
5952
5953 /*
5954 * Validate instruction information.
5955 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
5956 */
5957 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
5958 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
5959 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
5960 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
5961 AssertLogRelMsgReturn(fIsMemOperand,
5962 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
5963
5964 /*
5965 * Compute the complete effective address.
5966 *
5967 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
5968 * See AMD spec. 4.5.2 "Segment Registers".
5969 */
5970 RTGCPTR GCPtrMem = GCPtrDisp;
5971 if (fBaseRegValid)
5972 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
5973 if (fIdxRegValid)
5974 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
5975
5976 RTGCPTR const GCPtrOff = GCPtrMem;
5977 if ( !fIsLongMode
5978 || iSegReg >= X86_SREG_FS)
5979 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
5980 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
5981
5982 /*
5983 * Validate effective address.
5984 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
5985 */
5986 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
5987 Assert(cbAccess > 0);
5988 if (fIsLongMode)
5989 {
5990 if (X86_IS_CANONICAL(GCPtrMem))
5991 {
5992 *pGCPtrMem = GCPtrMem;
5993 return VINF_SUCCESS;
5994 }
5995
5996 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
5997 * "Data Limit Checks in 64-bit Mode". */
5998 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
5999 hmR0VmxSetPendingXcptGP(pVCpu, 0);
6000 return VINF_HM_PENDING_XCPT;
6001 }
6002
6003 /*
6004 * This is a watered down version of iemMemApplySegment().
6005 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6006 * and segment CPL/DPL checks are skipped.
6007 */
6008 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6009 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6010 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6011
6012 /* Check if the segment is present and usable. */
6013 if ( pSel->Attr.n.u1Present
6014 && !pSel->Attr.n.u1Unusable)
6015 {
6016 Assert(pSel->Attr.n.u1DescType);
6017 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6018 {
6019 /* Check permissions for the data segment. */
6020 if ( enmMemAccess == VMXMEMACCESS_WRITE
6021 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6022 {
6023 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6024 hmR0VmxSetPendingXcptGP(pVCpu, iSegReg);
6025 return VINF_HM_PENDING_XCPT;
6026 }
6027
6028 /* Check limits if it's a normal data segment. */
6029 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6030 {
6031 if ( GCPtrFirst32 > pSel->u32Limit
6032 || GCPtrLast32 > pSel->u32Limit)
6033 {
6034 Log4Func(("Data segment limit exceeded."
6035 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6036 GCPtrLast32, pSel->u32Limit));
6037 if (iSegReg == X86_SREG_SS)
6038 hmR0VmxSetPendingXcptSS(pVCpu, 0);
6039 else
6040 hmR0VmxSetPendingXcptGP(pVCpu, 0);
6041 return VINF_HM_PENDING_XCPT;
6042 }
6043 }
6044 else
6045 {
6046 /* Check limits if it's an expand-down data segment.
6047 Note! The upper boundary is defined by the B bit, not the G bit! */
6048 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6049 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6050 {
6051 Log4Func(("Expand-down data segment limit exceeded."
6052 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6053 GCPtrLast32, pSel->u32Limit));
6054 if (iSegReg == X86_SREG_SS)
6055 hmR0VmxSetPendingXcptSS(pVCpu, 0);
6056 else
6057 hmR0VmxSetPendingXcptGP(pVCpu, 0);
6058 return VINF_HM_PENDING_XCPT;
6059 }
6060 }
6061 }
6062 else
6063 {
6064 /* Check permissions for the code segment. */
6065 if ( enmMemAccess == VMXMEMACCESS_WRITE
6066 || ( enmMemAccess == VMXMEMACCESS_READ
6067 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6068 {
6069 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6070 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6071 hmR0VmxSetPendingXcptGP(pVCpu, 0);
6072 return VINF_HM_PENDING_XCPT;
6073 }
6074
6075 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6076 if ( GCPtrFirst32 > pSel->u32Limit
6077 || GCPtrLast32 > pSel->u32Limit)
6078 {
6079 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6080 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6081 if (iSegReg == X86_SREG_SS)
6082 hmR0VmxSetPendingXcptSS(pVCpu, 0);
6083 else
6084 hmR0VmxSetPendingXcptGP(pVCpu, 0);
6085 return VINF_HM_PENDING_XCPT;
6086 }
6087 }
6088 }
6089 else
6090 {
6091 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6092 hmR0VmxSetPendingXcptGP(pVCpu, 0);
6093 return VINF_HM_PENDING_XCPT;
6094 }
6095
6096 *pGCPtrMem = GCPtrMem;
6097 return VINF_SUCCESS;
6098}
6099
6100
6101/**
6102 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6103 * guest attempting to execute a VMX instruction.
6104 *
6105 * @returns Strict VBox status code (i.e. informational status codes too).
6106 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6107 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6108 *
6109 * @param pVCpu The cross context virtual CPU structure.
6110 * @param uExitReason The VM-exit reason.
6111 *
6112 * @todo NstVmx: Document other error codes when VM-exit is implemented.
6113 * @remarks No-long-jump zone!!!
6114 */
6115static VBOXSTRICTRC hmR0VmxCheckExitDueToVmxInstr(PVMCPU pVCpu, uint32_t uExitReason)
6116{
6117 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6118 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6119
6120 if ( CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx)
6121 || ( CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6122 && !CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
6123 {
6124 Log4Func(("In real/v86-mode or long-mode outside 64-bit code segment -> #UD\n"));
6125 hmR0VmxSetPendingXcptUD(pVCpu);
6126 return VINF_HM_PENDING_XCPT;
6127 }
6128
6129 if (uExitReason == VMX_EXIT_VMXON)
6130 {
6131 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6132
6133 /*
6134 * We check CR4.VMXE because it is required to be always set while in VMX operation
6135 * by physical CPUs and our CR4 read shadow is only consulted when executing specific
6136 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6137 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6138 */
6139 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6140 {
6141 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6142 hmR0VmxSetPendingXcptUD(pVCpu);
6143 return VINF_HM_PENDING_XCPT;
6144 }
6145 }
6146 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6147 {
6148 /*
6149 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6150 * (other than VMXON), we need to raise a #UD.
6151 */
6152 Log4Func(("Not in VMX root mode -> #UD\n"));
6153 hmR0VmxSetPendingXcptUD(pVCpu);
6154 return VINF_HM_PENDING_XCPT;
6155 }
6156
6157 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
6158 {
6159 /*
6160 * The nested-guest attempted to execute a VMX instruction, cause a VM-exit and let
6161 * the guest hypervisor deal with it.
6162 */
6163 /** @todo NSTVMX: Trigger a VM-exit */
6164 }
6165
6166 /*
6167 * VMX instructions require CPL 0 except in VMX non-root mode where the VM-exit intercept
6168 * (above) takes preceedence over the CPL check.
6169 */
6170 if (CPUMGetGuestCPL(pVCpu) > 0)
6171 {
6172 Log4Func(("CPL > 0 -> #GP(0)\n"));
6173 hmR0VmxSetPendingXcptGP(pVCpu, 0);
6174 return VINF_HM_PENDING_XCPT;
6175 }
6176
6177 return VINF_SUCCESS;
6178}
6179#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6180
6181
6182/**
6183 * Handle a condition that occurred while delivering an event through the guest
6184 * IDT.
6185 *
6186 * @returns Strict VBox status code (i.e. informational status codes too).
6187 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6188 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6189 * to continue execution of the guest which will delivery the \#DF.
6190 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6191 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6192 *
6193 * @param pVCpu The cross context virtual CPU structure.
6194 * @param pVmxTransient Pointer to the VMX transient structure.
6195 *
6196 * @remarks No-long-jump zone!!!
6197 */
6198static VBOXSTRICTRC hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
6199{
6200 uint32_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6201
6202 int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
6203 rc2 |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
6204 AssertRCReturn(rc2, rc2);
6205
6206 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6207 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
6208 {
6209 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
6210 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
6211
6212 /*
6213 * If the event was a software interrupt (generated with INT n) or a software exception
6214 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6215 * can handle the VM-exit and continue guest execution which will re-execute the
6216 * instruction rather than re-injecting the exception, as that can cause premature
6217 * trips to ring-3 before injection and involve TRPM which currently has no way of
6218 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6219 * the problem).
6220 */
6221 IEMXCPTRAISE enmRaise;
6222 IEMXCPTRAISEINFO fRaiseInfo;
6223 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6224 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6225 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6226 {
6227 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6228 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6229 }
6230 else if (VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo))
6231 {
6232 uint32_t const uExitVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uExitIntInfo);
6233 uint32_t const fIdtVectorFlags = hmR0VmxGetIemXcptFlags(uIdtVector, uIdtVectorType);
6234 uint32_t const fExitVectorFlags = hmR0VmxGetIemXcptFlags(uExitVector, uExitVectorType);
6235 /** @todo Make AssertMsgReturn as just AssertMsg later. */
6236 AssertMsgReturn(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT,
6237 ("hmR0VmxCheckExitDueToEventDelivery: Unexpected VM-exit interruption info. %#x!\n",
6238 uExitVectorType), VERR_VMX_IPE_5);
6239
6240 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6241
6242 /* Determine a vectoring #PF condition, see comment in hmR0VmxExitXcptPF(). */
6243 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6244 {
6245 pVmxTransient->fVectoringPF = true;
6246 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6247 }
6248 }
6249 else
6250 {
6251 /*
6252 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6253 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6254 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6255 */
6256 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6257 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6258 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6259 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6260 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6261 }
6262
6263 /*
6264 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6265 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6266 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6267 * subsequent VM-entry would fail.
6268 *
6269 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}.
6270 */
6271 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)
6272 && uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6273 && ( enmRaise == IEMXCPTRAISE_PREV_EVENT
6274 || (fRaiseInfo & IEMXCPTRAISEINFO_NMI_PF))
6275 && (pVCpu->hm.s.vmx.Ctls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
6276 {
6277 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6278 }
6279
6280 switch (enmRaise)
6281 {
6282 case IEMXCPTRAISE_CURRENT_XCPT:
6283 {
6284 Log4Func(("IDT: Pending secondary Xcpt: uIdtVectoringInfo=%#RX64 uExitIntInfo=%#RX64\n",
6285 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uExitIntInfo));
6286 Assert(rcStrict == VINF_SUCCESS);
6287 break;
6288 }
6289
6290 case IEMXCPTRAISE_PREV_EVENT:
6291 {
6292 uint32_t u32ErrCode;
6293 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
6294 {
6295 rc2 = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
6296 AssertRCReturn(rc2, rc2);
6297 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6298 }
6299 else
6300 u32ErrCode = 0;
6301
6302 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see hmR0VmxExitXcptPF(). */
6303 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
6304 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
6305 0 /* cbInstr */, u32ErrCode, pVCpu->cpum.GstCtx.cr2);
6306
6307 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntInfo,
6308 pVCpu->hm.s.Event.u32ErrCode));
6309 Assert(rcStrict == VINF_SUCCESS);
6310 break;
6311 }
6312
6313 case IEMXCPTRAISE_REEXEC_INSTR:
6314 Assert(rcStrict == VINF_SUCCESS);
6315 break;
6316
6317 case IEMXCPTRAISE_DOUBLE_FAULT:
6318 {
6319 /*
6320 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
6321 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6322 */
6323 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6324 {
6325 pVmxTransient->fVectoringDoublePF = true;
6326 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo,
6327 pVCpu->cpum.GstCtx.cr2));
6328 rcStrict = VINF_SUCCESS;
6329 }
6330 else
6331 {
6332 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
6333 hmR0VmxSetPendingXcptDF(pVCpu);
6334 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo,
6335 uIdtVector, uExitVector));
6336 rcStrict = VINF_HM_DOUBLE_FAULT;
6337 }
6338 break;
6339 }
6340
6341 case IEMXCPTRAISE_TRIPLE_FAULT:
6342 {
6343 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));
6344 rcStrict = VINF_EM_RESET;
6345 break;
6346 }
6347
6348 case IEMXCPTRAISE_CPU_HANG:
6349 {
6350 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6351 rcStrict = VERR_EM_GUEST_CPU_HANG;
6352 break;
6353 }
6354
6355 default:
6356 {
6357 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6358 rcStrict = VERR_VMX_IPE_2;
6359 break;
6360 }
6361 }
6362 }
6363 else if ( VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo)
6364 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitIntInfo)
6365 && uExitVector != X86_XCPT_DF
6366 && (pVCpu->hm.s.vmx.Ctls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
6367 {
6368 /*
6369 * Execution of IRET caused this fault when NMI blocking was in effect (i.e we're in the guest NMI handler).
6370 * We need to set the block-by-NMI field so that NMIs remain blocked until the IRET execution is restarted.
6371 * See Intel spec. 30.7.1.2 "Resuming guest software after handling an exception".
6372 */
6373 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
6374 {
6375 Log4Func(("Setting VMCPU_FF_BLOCK_NMIS. fValid=%RTbool uExitReason=%u\n",
6376 VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason));
6377 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6378 }
6379 }
6380
6381 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6382 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6383 return rcStrict;
6384}
6385
6386
6387/**
6388 * Imports a guest segment register from the current VMCS into
6389 * the guest-CPU context.
6390 *
6391 * @returns VBox status code.
6392 * @param pVCpu The cross context virtual CPU structure.
6393 * @param idxSel Index of the selector in the VMCS.
6394 * @param idxLimit Index of the segment limit in the VMCS.
6395 * @param idxBase Index of the segment base in the VMCS.
6396 * @param idxAccess Index of the access rights of the segment in the VMCS.
6397 * @param pSelReg Pointer to the segment selector.
6398 *
6399 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
6400 * do not log!
6401 *
6402 * @remarks Never call this function directly!!! Use the
6403 * HMVMX_IMPORT_SREG() macro as that takes care
6404 * of whether to read from the VMCS cache or not.
6405 */
6406static int hmR0VmxImportGuestSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
6407 PCPUMSELREG pSelReg)
6408{
6409 NOREF(pVCpu);
6410
6411 uint32_t u32Sel;
6412 uint32_t u32Limit;
6413 uint32_t u32Attr;
6414 uint64_t u64Base;
6415 int rc = VMXReadVmcs32(idxSel, &u32Sel);
6416 rc |= VMXReadVmcs32(idxLimit, &u32Limit);
6417 rc |= VMXReadVmcs32(idxAccess, &u32Attr);
6418 rc |= VMXReadVmcsGstNByIdxVal(idxBase, &u64Base);
6419 AssertRCReturn(rc, rc);
6420
6421 pSelReg->Sel = (uint16_t)u32Sel;
6422 pSelReg->ValidSel = (uint16_t)u32Sel;
6423 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6424 pSelReg->u32Limit = u32Limit;
6425 pSelReg->u64Base = u64Base;
6426 pSelReg->Attr.u = u32Attr;
6427
6428 /*
6429 * If VT-x marks the segment as unusable, most other bits remain undefined:
6430 * - For CS the L, D and G bits have meaning.
6431 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
6432 * - For the remaining data segments no bits are defined.
6433 *
6434 * The present bit and the unusable bit has been observed to be set at the
6435 * same time (the selector was supposed to be invalid as we started executing
6436 * a V8086 interrupt in ring-0).
6437 *
6438 * What should be important for the rest of the VBox code, is that the P bit is
6439 * cleared. Some of the other VBox code recognizes the unusable bit, but
6440 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
6441 * safe side here, we'll strip off P and other bits we don't care about. If
6442 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
6443 *
6444 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
6445 */
6446 if (pSelReg->Attr.u & X86DESCATTR_UNUSABLE)
6447 {
6448 Assert(idxSel != VMX_VMCS16_GUEST_TR_SEL); /* TR is the only selector that can never be unusable. */
6449
6450 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
6451 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
6452 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
6453#ifdef VBOX_STRICT
6454 VMMRZCallRing3Disable(pVCpu);
6455 Log4Func(("Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Sel, pSelReg->Attr.u));
6456# ifdef DEBUG_bird
6457 AssertMsg((u32Attr & ~X86DESCATTR_P) == pSelReg->Attr.u,
6458 ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
6459 idxSel, u32Sel, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
6460# endif
6461 VMMRZCallRing3Enable(pVCpu);
6462#endif
6463 }
6464 return VINF_SUCCESS;
6465}
6466
6467
6468/**
6469 * Imports the guest RIP from the VMCS back into the guest-CPU context.
6470 *
6471 * @returns VBox status code.
6472 * @param pVCpu The cross context virtual CPU structure.
6473 *
6474 * @remarks Called with interrupts and/or preemption disabled, should not assert!
6475 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
6476 * instead!!!
6477 */
6478DECLINLINE(int) hmR0VmxImportGuestRip(PVMCPU pVCpu)
6479{
6480 uint64_t u64Val;
6481 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6482 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
6483 {
6484 int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
6485 if (RT_SUCCESS(rc))
6486 {
6487 pCtx->rip = u64Val;
6488 EMR0HistoryUpdatePC(pVCpu, pCtx->rip, false);
6489 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
6490 }
6491 return rc;
6492 }
6493 return VINF_SUCCESS;
6494}
6495
6496
6497/**
6498 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
6499 *
6500 * @returns VBox status code.
6501 * @param pVCpu The cross context virtual CPU structure.
6502 *
6503 * @remarks Called with interrupts and/or preemption disabled, should not assert!
6504 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
6505 * instead!!!
6506 */
6507DECLINLINE(int) hmR0VmxImportGuestRFlags(PVMCPU pVCpu)
6508{
6509 uint32_t u32Val;
6510 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6511 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
6512 {
6513 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val);
6514 if (RT_SUCCESS(rc))
6515 {
6516 pCtx->eflags.u32 = u32Val;
6517
6518 /* Restore eflags for real-on-v86-mode hack. */
6519 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6520 {
6521 pCtx->eflags.Bits.u1VM = 0;
6522 pCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;
6523 }
6524 }
6525 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
6526 return rc;
6527 }
6528 return VINF_SUCCESS;
6529}
6530
6531
6532/**
6533 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
6534 * context.
6535 *
6536 * @returns VBox status code.
6537 * @param pVCpu The cross context virtual CPU structure.
6538 *
6539 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
6540 * do not log!
6541 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
6542 * instead!!!
6543 */
6544DECLINLINE(int) hmR0VmxImportGuestIntrState(PVMCPU pVCpu)
6545{
6546 uint32_t u32Val;
6547 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6548 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &u32Val);
6549 AssertRCReturn(rc, rc);
6550
6551 /*
6552 * We additionally have a requirement to import RIP, RFLAGS depending on whether we
6553 * might need them in while evaluating pending events before VM-entry.
6554 */
6555 if (!u32Val)
6556 {
6557 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6558 {
6559 rc = hmR0VmxImportGuestRip(pVCpu);
6560 rc |= hmR0VmxImportGuestRFlags(pVCpu);
6561 AssertRCReturn(rc, rc);
6562 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6563 }
6564
6565 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
6566 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6567 }
6568 else
6569 {
6570 rc = hmR0VmxImportGuestRip(pVCpu);
6571 rc |= hmR0VmxImportGuestRFlags(pVCpu);
6572 AssertRCReturn(rc, rc);
6573
6574 if (u32Val & ( VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
6575 | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
6576 {
6577 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
6578 }
6579 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6580 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6581
6582 if (u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
6583 {
6584 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
6585 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6586 }
6587 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
6588 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6589 }
6590
6591 return VINF_SUCCESS;
6592}
6593
6594
6595/**
6596 * Worker for VMXR0ImportStateOnDemand.
6597 *
6598 * @returns VBox status code.
6599 * @param pVCpu The cross context virtual CPU structure.
6600 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
6601 */
6602static int hmR0VmxImportGuestState(PVMCPU pVCpu, uint64_t fWhat)
6603{
6604#define VMXLOCAL_BREAK_RC(a_rc) \
6605 if (RT_FAILURE(a_rc)) \
6606 break
6607
6608 int rc = VINF_SUCCESS;
6609 PVM pVM = pVCpu->CTX_SUFF(pVM);
6610 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6611 uint64_t u64Val;
6612 uint32_t u32Val;
6613
6614 Log4Func(("fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat));
6615 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x);
6616
6617 /*
6618 * We disable interrupts to make the updating of the state and in particular
6619 * the fExtrn modification atomic wrt to preemption hooks.
6620 */
6621 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
6622
6623 fWhat &= pCtx->fExtrn;
6624 if (fWhat)
6625 {
6626 do
6627 {
6628 if (fWhat & CPUMCTX_EXTRN_RIP)
6629 {
6630 rc = hmR0VmxImportGuestRip(pVCpu);
6631 VMXLOCAL_BREAK_RC(rc);
6632 }
6633
6634 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
6635 {
6636 rc = hmR0VmxImportGuestRFlags(pVCpu);
6637 VMXLOCAL_BREAK_RC(rc);
6638 }
6639
6640 if (fWhat & CPUMCTX_EXTRN_HM_VMX_INT_STATE)
6641 {
6642 rc = hmR0VmxImportGuestIntrState(pVCpu);
6643 VMXLOCAL_BREAK_RC(rc);
6644 }
6645
6646 if (fWhat & CPUMCTX_EXTRN_RSP)
6647 {
6648 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
6649 VMXLOCAL_BREAK_RC(rc);
6650 pCtx->rsp = u64Val;
6651 }
6652
6653 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
6654 {
6655 if (fWhat & CPUMCTX_EXTRN_CS)
6656 {
6657 rc = HMVMX_IMPORT_SREG(CS, &pCtx->cs);
6658 rc |= hmR0VmxImportGuestRip(pVCpu);
6659 VMXLOCAL_BREAK_RC(rc);
6660 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6661 pCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;
6662 EMR0HistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true);
6663 }
6664 if (fWhat & CPUMCTX_EXTRN_SS)
6665 {
6666 rc = HMVMX_IMPORT_SREG(SS, &pCtx->ss);
6667 VMXLOCAL_BREAK_RC(rc);
6668 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6669 pCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;
6670 }
6671 if (fWhat & CPUMCTX_EXTRN_DS)
6672 {
6673 rc = HMVMX_IMPORT_SREG(DS, &pCtx->ds);
6674 VMXLOCAL_BREAK_RC(rc);
6675 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6676 pCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;
6677 }
6678 if (fWhat & CPUMCTX_EXTRN_ES)
6679 {
6680 rc = HMVMX_IMPORT_SREG(ES, &pCtx->es);
6681 VMXLOCAL_BREAK_RC(rc);
6682 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6683 pCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;
6684 }
6685 if (fWhat & CPUMCTX_EXTRN_FS)
6686 {
6687 rc = HMVMX_IMPORT_SREG(FS, &pCtx->fs);
6688 VMXLOCAL_BREAK_RC(rc);
6689 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6690 pCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;
6691 }
6692 if (fWhat & CPUMCTX_EXTRN_GS)
6693 {
6694 rc = HMVMX_IMPORT_SREG(GS, &pCtx->gs);
6695 VMXLOCAL_BREAK_RC(rc);
6696 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6697 pCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
6698 }
6699 }
6700
6701 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
6702 {
6703 if (fWhat & CPUMCTX_EXTRN_LDTR)
6704 {
6705 rc = HMVMX_IMPORT_SREG(LDTR, &pCtx->ldtr);
6706 VMXLOCAL_BREAK_RC(rc);
6707 }
6708
6709 if (fWhat & CPUMCTX_EXTRN_GDTR)
6710 {
6711 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
6712 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
6713 VMXLOCAL_BREAK_RC(rc);
6714 pCtx->gdtr.pGdt = u64Val;
6715 pCtx->gdtr.cbGdt = u32Val;
6716 }
6717
6718 /* Guest IDTR. */
6719 if (fWhat & CPUMCTX_EXTRN_IDTR)
6720 {
6721 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
6722 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
6723 VMXLOCAL_BREAK_RC(rc);
6724 pCtx->idtr.pIdt = u64Val;
6725 pCtx->idtr.cbIdt = u32Val;
6726 }
6727
6728 /* Guest TR. */
6729 if (fWhat & CPUMCTX_EXTRN_TR)
6730 {
6731 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR, don't save that one. */
6732 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6733 {
6734 rc = HMVMX_IMPORT_SREG(TR, &pCtx->tr);
6735 VMXLOCAL_BREAK_RC(rc);
6736 }
6737 }
6738 }
6739
6740 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
6741 {
6742 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip);
6743 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp);
6744 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val);
6745 pCtx->SysEnter.cs = u32Val;
6746 VMXLOCAL_BREAK_RC(rc);
6747 }
6748
6749#if HC_ARCH_BITS == 64
6750 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
6751 {
6752 if ( pVM->hm.s.fAllow64BitGuests
6753 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
6754 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
6755 }
6756
6757 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
6758 {
6759 if ( pVM->hm.s.fAllow64BitGuests
6760 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
6761 {
6762 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
6763 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
6764 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
6765 }
6766 }
6767#endif
6768
6769 if ( (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
6770#if HC_ARCH_BITS == 32
6771 || (fWhat & (CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS))
6772#endif
6773 )
6774 {
6775 PCVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
6776 uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs;
6777 for (uint32_t i = 0; i < cMsrs; i++, pMsr++)
6778 {
6779 switch (pMsr->u32Msr)
6780 {
6781#if HC_ARCH_BITS == 32
6782 case MSR_K8_LSTAR: pCtx->msrLSTAR = pMsr->u64Value; break;
6783 case MSR_K6_STAR: pCtx->msrSTAR = pMsr->u64Value; break;
6784 case MSR_K8_SF_MASK: pCtx->msrSFMASK = pMsr->u64Value; break;
6785 case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsr->u64Value; break;
6786#endif
6787 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value); break;
6788 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsr->u64Value); break;
6789 case MSR_K6_EFER: /* EFER can't be changed without causing a VM-exit */ break;
6790 default:
6791 {
6792 pVCpu->hm.s.u32HMError = pMsr->u32Msr;
6793 ASMSetFlags(fEFlags);
6794 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr,
6795 cMsrs));
6796 return VERR_HM_UNEXPECTED_LD_ST_MSR;
6797 }
6798 }
6799 }
6800 }
6801
6802 if (fWhat & CPUMCTX_EXTRN_DR7)
6803 {
6804 if (!pVCpu->hm.s.fUsingHyperDR7)
6805 {
6806 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
6807 rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val);
6808 VMXLOCAL_BREAK_RC(rc);
6809 pCtx->dr[7] = u32Val;
6810 }
6811 }
6812
6813 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
6814 {
6815 uint32_t u32Shadow;
6816 if (fWhat & CPUMCTX_EXTRN_CR0)
6817 {
6818 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val);
6819 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u32Shadow);
6820 VMXLOCAL_BREAK_RC(rc);
6821 u32Val = (u32Val & ~pVCpu->hm.s.vmx.Ctls.u32Cr0Mask)
6822 | (u32Shadow & pVCpu->hm.s.vmx.Ctls.u32Cr0Mask);
6823 VMMRZCallRing3Disable(pVCpu); /* Calls into PGM which has Log statements. */
6824 CPUMSetGuestCR0(pVCpu, u32Val);
6825 VMMRZCallRing3Enable(pVCpu);
6826 }
6827
6828 if (fWhat & CPUMCTX_EXTRN_CR4)
6829 {
6830 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32Val);
6831 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u32Shadow);
6832 VMXLOCAL_BREAK_RC(rc);
6833 u32Val = (u32Val & ~pVCpu->hm.s.vmx.Ctls.u32Cr4Mask)
6834 | (u32Shadow & pVCpu->hm.s.vmx.Ctls.u32Cr4Mask);
6835 CPUMSetGuestCR4(pVCpu, u32Val);
6836 }
6837
6838 if (fWhat & CPUMCTX_EXTRN_CR3)
6839 {
6840 /* CR0.PG bit changes are always intercepted, so it's up to date. */
6841 if ( pVM->hm.s.vmx.fUnrestrictedGuest
6842 || ( pVM->hm.s.fNestedPaging
6843 && CPUMIsGuestPagingEnabledEx(pCtx)))
6844 {
6845 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
6846 if (pCtx->cr3 != u64Val)
6847 {
6848 CPUMSetGuestCR3(pVCpu, u64Val);
6849 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
6850 }
6851
6852 /* If the guest is in PAE mode, sync back the PDPE's into the guest state.
6853 Note: CR4.PAE, CR0.PG, EFER bit changes are always intercepted, so they're up to date. */
6854 if (CPUMIsGuestInPAEModeEx(pCtx))
6855 {
6856 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);
6857 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);
6858 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);
6859 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);
6860 VMXLOCAL_BREAK_RC(rc);
6861 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
6862 }
6863 }
6864 }
6865 }
6866 } while (0);
6867
6868 if (RT_SUCCESS(rc))
6869 {
6870 /* Update fExtrn. */
6871 pCtx->fExtrn &= ~fWhat;
6872
6873 /* If everything has been imported, clear the HM keeper bit. */
6874 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
6875 {
6876 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
6877 Assert(!pCtx->fExtrn);
6878 }
6879 }
6880 }
6881 else
6882 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
6883
6884 ASMSetFlags(fEFlags);
6885
6886 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatImportGuestState, x);
6887
6888 /*
6889 * Honor any pending CR3 updates.
6890 *
6891 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
6892 * -> VMMRZCallRing3Disable() -> hmR0VmxImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
6893 * -> continue with VM-exit handling -> hmR0VmxImportGuestState() and here we are.
6894 *
6895 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
6896 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
6897 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
6898 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
6899 *
6900 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
6901 */
6902 if (VMMRZCallRing3IsEnabled(pVCpu))
6903 {
6904 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6905 {
6906 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
6907 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
6908 }
6909
6910 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6911 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6912
6913 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6914 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6915 }
6916
6917 return VINF_SUCCESS;
6918#undef VMXLOCAL_BREAK_RC
6919}
6920
6921
6922/**
6923 * Saves the guest state from the VMCS into the guest-CPU context.
6924 *
6925 * @returns VBox status code.
6926 * @param pVCpu The cross context virtual CPU structure.
6927 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
6928 */
6929VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat)
6930{
6931 return hmR0VmxImportGuestState(pVCpu, fWhat);
6932}
6933
6934
6935/**
6936 * Check per-VM and per-VCPU force flag actions that require us to go back to
6937 * ring-3 for one reason or another.
6938 *
6939 * @returns Strict VBox status code (i.e. informational status codes too)
6940 * @retval VINF_SUCCESS if we don't have any actions that require going back to
6941 * ring-3.
6942 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
6943 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
6944 * interrupts)
6945 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
6946 * all EMTs to be in ring-3.
6947 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
6948 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
6949 * to the EM loop.
6950 *
6951 * @param pVCpu The cross context virtual CPU structure.
6952 * @param fStepping Running in hmR0VmxRunGuestCodeStep().
6953 */
6954static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVMCPU pVCpu, bool fStepping)
6955{
6956 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6957
6958 /*
6959 * Update pending interrupts into the APIC's IRR.
6960 */
6961 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
6962 APICUpdatePendingInterrupts(pVCpu);
6963
6964 /*
6965 * Anything pending? Should be more likely than not if we're doing a good job.
6966 */
6967 PVM pVM = pVCpu->CTX_SUFF(pVM);
6968 if ( !fStepping
6969 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
6970 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
6971 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
6972 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
6973 return VINF_SUCCESS;
6974
6975 /* Pending PGM C3 sync. */
6976 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
6977 {
6978 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6979 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
6980 VBOXSTRICTRC rcStrict2 = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
6981 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
6982 if (rcStrict2 != VINF_SUCCESS)
6983 {
6984 AssertRC(VBOXSTRICTRC_VAL(rcStrict2));
6985 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict2)));
6986 return rcStrict2;
6987 }
6988 }
6989
6990 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
6991 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
6992 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
6993 {
6994 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
6995 int rc2 = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
6996 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
6997 return rc2;
6998 }
6999
7000 /* Pending VM request packets, such as hardware interrupts. */
7001 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
7002 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
7003 {
7004 Log4Func(("Pending VM request forcing us back to ring-3\n"));
7005 return VINF_EM_PENDING_REQUEST;
7006 }
7007
7008 /* Pending PGM pool flushes. */
7009 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
7010 {
7011 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
7012 return VINF_PGM_POOL_FLUSH_PENDING;
7013 }
7014
7015 /* Pending DMA requests. */
7016 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
7017 {
7018 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
7019 return VINF_EM_RAW_TO_R3;
7020 }
7021
7022 return VINF_SUCCESS;
7023}
7024
7025
7026/**
7027 * Converts any TRPM trap into a pending HM event. This is typically used when
7028 * entering from ring-3 (not longjmp returns).
7029 *
7030 * @param pVCpu The cross context virtual CPU structure.
7031 */
7032static void hmR0VmxTrpmTrapToPendingEvent(PVMCPU pVCpu)
7033{
7034 Assert(TRPMHasTrap(pVCpu));
7035 Assert(!pVCpu->hm.s.Event.fPending);
7036
7037 uint8_t uVector;
7038 TRPMEVENT enmTrpmEvent;
7039 RTGCUINT uErrCode;
7040 RTGCUINTPTR GCPtrFaultAddress;
7041 uint8_t cbInstr;
7042
7043 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
7044 AssertRC(rc);
7045
7046 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntInfo. */
7047 uint32_t u32IntInfo = uVector | VMX_EXIT_INT_INFO_VALID;
7048 if (enmTrpmEvent == TRPM_TRAP)
7049 {
7050 switch (uVector)
7051 {
7052 case X86_XCPT_NMI:
7053 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_NMI << VMX_EXIT_INT_INFO_TYPE_SHIFT);
7054 break;
7055
7056 case X86_XCPT_BP:
7057 case X86_XCPT_OF:
7058 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_SW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
7059 break;
7060
7061 case X86_XCPT_PF:
7062 case X86_XCPT_DF:
7063 case X86_XCPT_TS:
7064 case X86_XCPT_NP:
7065 case X86_XCPT_SS:
7066 case X86_XCPT_GP:
7067 case X86_XCPT_AC:
7068 u32IntInfo |= VMX_EXIT_INT_INFO_ERROR_CODE_VALID;
7069 RT_FALL_THRU();
7070 default:
7071 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
7072 break;
7073 }
7074 }
7075 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
7076 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_EXT_INT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
7077 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
7078 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_SW_INT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
7079 else
7080 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
7081
7082 rc = TRPMResetTrap(pVCpu);
7083 AssertRC(rc);
7084 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
7085 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
7086
7087 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
7088}
7089
7090
7091/**
7092 * Converts the pending HM event into a TRPM trap.
7093 *
7094 * @param pVCpu The cross context virtual CPU structure.
7095 */
7096static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
7097{
7098 Assert(pVCpu->hm.s.Event.fPending);
7099
7100 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
7101 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo);
7102 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVCpu->hm.s.Event.u64IntInfo);
7103 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
7104
7105 /* If a trap was already pending, we did something wrong! */
7106 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
7107
7108 TRPMEVENT enmTrapType;
7109 switch (uVectorType)
7110 {
7111 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
7112 enmTrapType = TRPM_HARDWARE_INT;
7113 break;
7114
7115 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
7116 enmTrapType = TRPM_SOFTWARE_INT;
7117 break;
7118
7119 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
7120 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
7121 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
7122 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
7123 enmTrapType = TRPM_TRAP;
7124 break;
7125
7126 default:
7127 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
7128 enmTrapType = TRPM_32BIT_HACK;
7129 break;
7130 }
7131
7132 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
7133
7134 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
7135 AssertRC(rc);
7136
7137 if (fErrorCodeValid)
7138 TRPMSetErrorCode(pVCpu, uErrorCode);
7139
7140 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
7141 && uVector == X86_XCPT_PF)
7142 {
7143 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
7144 }
7145 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
7146 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
7147 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
7148 {
7149 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
7150 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
7151 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
7152 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
7153 }
7154
7155 /* Clear the events from the VMCS. */
7156 VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0);
7157
7158 /* We're now done converting the pending event. */
7159 pVCpu->hm.s.Event.fPending = false;
7160}
7161
7162
7163/**
7164 * Does the necessary state syncing before returning to ring-3 for any reason
7165 * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
7166 *
7167 * @returns VBox status code.
7168 * @param pVCpu The cross context virtual CPU structure.
7169 * @param fImportState Whether to import the guest state from the VMCS back
7170 * to the guest-CPU context.
7171 *
7172 * @remarks No-long-jmp zone!!!
7173 */
7174static int hmR0VmxLeave(PVMCPU pVCpu, bool fImportState)
7175{
7176 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7177 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7178
7179 RTCPUID idCpu = RTMpCpuId();
7180 Log4Func(("HostCpuId=%u\n", idCpu));
7181
7182 /*
7183 * !!! IMPORTANT !!!
7184 * If you modify code here, check whether hmR0VmxCallRing3Callback() needs to be updated too.
7185 */
7186
7187 /* Save the guest state if necessary. */
7188 if (fImportState)
7189 {
7190 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
7191 AssertRCReturn(rc, rc);
7192 }
7193
7194 /* Restore host FPU state if necessary. We will resync on next R0 reentry. */
7195 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
7196 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
7197
7198 /* Restore host debug registers if necessary. We will resync on next R0 reentry. */
7199#ifdef VBOX_STRICT
7200 if (CPUMIsHyperDebugStateActive(pVCpu))
7201 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT);
7202#endif
7203 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
7204 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu));
7205 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu));
7206
7207#if HC_ARCH_BITS == 64
7208 /* Restore host-state bits that VT-x only restores partially. */
7209 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
7210 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
7211 {
7212 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu));
7213 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
7214 }
7215 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
7216#endif
7217
7218 /* Restore the lazy host MSRs as we're leaving VT-x context. */
7219 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
7220 {
7221 /* We shouldn't restore the host MSRs without saving the guest MSRs first. */
7222 if (!fImportState)
7223 {
7224 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS);
7225 AssertRCReturn(rc, rc);
7226 }
7227 hmR0VmxLazyRestoreHostMsrs(pVCpu);
7228 Assert(!pVCpu->hm.s.vmx.fLazyMsrs);
7229 }
7230 else
7231 pVCpu->hm.s.vmx.fLazyMsrs = 0;
7232
7233 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
7234 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
7235
7236 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
7237 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState);
7238 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState);
7239 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatPreExit);
7240 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitHandling);
7241 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
7242 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
7243 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
7244 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
7245
7246 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
7247
7248 /** @todo This partially defeats the purpose of having preemption hooks.
7249 * The problem is, deregistering the hooks should be moved to a place that
7250 * lasts until the EMT is about to be destroyed not everytime while leaving HM
7251 * context.
7252 */
7253 if (pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7254 {
7255 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7256 AssertRCReturn(rc, rc);
7257
7258 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR;
7259 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
7260 }
7261 Assert(!(pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_LAUNCHED));
7262 NOREF(idCpu);
7263
7264 return VINF_SUCCESS;
7265}
7266
7267
7268/**
7269 * Leaves the VT-x session.
7270 *
7271 * @returns VBox status code.
7272 * @param pVCpu The cross context virtual CPU structure.
7273 *
7274 * @remarks No-long-jmp zone!!!
7275 */
7276static int hmR0VmxLeaveSession(PVMCPU pVCpu)
7277{
7278 HM_DISABLE_PREEMPT(pVCpu);
7279 HMVMX_ASSERT_CPU_SAFE(pVCpu);
7280 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7281 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7282
7283 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
7284 and done this from the VMXR0ThreadCtxCallback(). */
7285 if (!pVCpu->hm.s.fLeaveDone)
7286 {
7287 int rc2 = hmR0VmxLeave(pVCpu, true /* fImportState */);
7288 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2);
7289 pVCpu->hm.s.fLeaveDone = true;
7290 }
7291 Assert(!pVCpu->cpum.GstCtx.fExtrn);
7292
7293 /*
7294 * !!! IMPORTANT !!!
7295 * If you modify code here, make sure to check whether hmR0VmxCallRing3Callback() needs to be updated too.
7296 */
7297
7298 /* Deregister hook now that we've left HM context before re-enabling preemption. */
7299 /** @todo Deregistering here means we need to VMCLEAR always
7300 * (longjmp/exit-to-r3) in VT-x which is not efficient, eliminate need
7301 * for calling VMMR0ThreadCtxHookDisable here! */
7302 VMMR0ThreadCtxHookDisable(pVCpu);
7303
7304 /* Leave HM context. This takes care of local init (term). */
7305 int rc = HMR0LeaveCpu(pVCpu);
7306
7307 HM_RESTORE_PREEMPT();
7308 return rc;
7309}
7310
7311
7312/**
7313 * Does the necessary state syncing before doing a longjmp to ring-3.
7314 *
7315 * @returns VBox status code.
7316 * @param pVCpu The cross context virtual CPU structure.
7317 *
7318 * @remarks No-long-jmp zone!!!
7319 */
7320DECLINLINE(int) hmR0VmxLongJmpToRing3(PVMCPU pVCpu)
7321{
7322 return hmR0VmxLeaveSession(pVCpu);
7323}
7324
7325
7326/**
7327 * Take necessary actions before going back to ring-3.
7328 *
7329 * An action requires us to go back to ring-3. This function does the necessary
7330 * steps before we can safely return to ring-3. This is not the same as longjmps
7331 * to ring-3, this is voluntary and prepares the guest so it may continue
7332 * executing outside HM (recompiler/IEM).
7333 *
7334 * @returns VBox status code.
7335 * @param pVCpu The cross context virtual CPU structure.
7336 * @param rcExit The reason for exiting to ring-3. Can be
7337 * VINF_VMM_UNKNOWN_RING3_CALL.
7338 */
7339static int hmR0VmxExitToRing3(PVMCPU pVCpu, VBOXSTRICTRC rcExit)
7340{
7341 Assert(pVCpu);
7342 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
7343
7344 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
7345 {
7346 VMXGetActivatedVmcs(&pVCpu->hm.s.vmx.LastError.u64VmcsPhys);
7347 pVCpu->hm.s.vmx.LastError.u32VmcsRev = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
7348 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
7349 /* LastError.idCurrentCpu was updated in hmR0VmxPreRunGuestCommitted(). */
7350 }
7351
7352 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
7353 VMMRZCallRing3Disable(pVCpu);
7354 Log4Func(("rcExit=%d\n", VBOXSTRICTRC_VAL(rcExit)));
7355
7356 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
7357 if (pVCpu->hm.s.Event.fPending)
7358 {
7359 hmR0VmxPendingEventToTrpmTrap(pVCpu);
7360 Assert(!pVCpu->hm.s.Event.fPending);
7361 }
7362
7363 /* Clear interrupt-window and NMI-window controls as we re-evaluate it when we return from ring-3. */
7364 hmR0VmxClearIntNmiWindowsVmcs(pVCpu);
7365
7366 /* If we're emulating an instruction, we shouldn't have any TRPM traps pending
7367 and if we're injecting an event we should have a TRPM trap pending. */
7368 AssertMsg(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
7369#ifndef DEBUG_bird /* Triggered after firing an NMI against NT4SP1, possibly a triple fault in progress. */
7370 AssertMsg(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
7371#endif
7372
7373 /* Save guest state and restore host state bits. */
7374 int rc = hmR0VmxLeaveSession(pVCpu);
7375 AssertRCReturn(rc, rc);
7376 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
7377 /* Thread-context hooks are unregistered at this point!!! */
7378
7379 /* Sync recompiler state. */
7380 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
7381 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
7382 | CPUM_CHANGED_LDTR
7383 | CPUM_CHANGED_GDTR
7384 | CPUM_CHANGED_IDTR
7385 | CPUM_CHANGED_TR
7386 | CPUM_CHANGED_HIDDEN_SEL_REGS);
7387 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging
7388 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx))
7389 {
7390 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
7391 }
7392
7393 Assert(!pVCpu->hm.s.fClearTrapFlag);
7394
7395 /* Update the exit-to-ring 3 reason. */
7396 pVCpu->hm.s.rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit);
7397
7398 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
7399 if (rcExit != VINF_EM_RAW_INTERRUPT)
7400 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
7401
7402 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
7403
7404 /* We do -not- want any longjmp notifications after this! We must return to ring-3 ASAP. */
7405 VMMRZCallRing3RemoveNotification(pVCpu);
7406 VMMRZCallRing3Enable(pVCpu);
7407
7408 return rc;
7409}
7410
7411
7412/**
7413 * VMMRZCallRing3() callback wrapper which saves the guest state before we
7414 * longjump to ring-3 and possibly get preempted.
7415 *
7416 * @returns VBox status code.
7417 * @param pVCpu The cross context virtual CPU structure.
7418 * @param enmOperation The operation causing the ring-3 longjump.
7419 * @param pvUser User argument, currently unused, NULL.
7420 */
7421static DECLCALLBACK(int) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
7422{
7423 RT_NOREF(pvUser);
7424 if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
7425 {
7426 /*
7427 * !!! IMPORTANT !!!
7428 * If you modify code here, check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs to be updated too.
7429 * This is a stripped down version which gets out ASAP, trying to not trigger any further assertions.
7430 */
7431 VMMRZCallRing3RemoveNotification(pVCpu);
7432 VMMRZCallRing3Disable(pVCpu);
7433 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
7434 RTThreadPreemptDisable(&PreemptState);
7435
7436 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
7437 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
7438 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
7439
7440#if HC_ARCH_BITS == 64
7441 /* Restore host-state bits that VT-x only restores partially. */
7442 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
7443 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
7444 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
7445 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
7446#endif
7447
7448 /* Restore the lazy host MSRs as we're leaving VT-x context. */
7449 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
7450 hmR0VmxLazyRestoreHostMsrs(pVCpu);
7451
7452 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
7453 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
7454 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
7455 if (pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7456 {
7457 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7458 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR;
7459 }
7460
7461 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
7462 VMMR0ThreadCtxHookDisable(pVCpu);
7463 HMR0LeaveCpu(pVCpu);
7464 RTThreadPreemptRestore(&PreemptState);
7465 return VINF_SUCCESS;
7466 }
7467
7468 Assert(pVCpu);
7469 Assert(pvUser);
7470 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7471 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
7472
7473 VMMRZCallRing3Disable(pVCpu);
7474 Assert(VMMR0IsLogFlushDisabled(pVCpu));
7475
7476 Log4Func((" -> hmR0VmxLongJmpToRing3 enmOperation=%d\n", enmOperation));
7477
7478 int rc = hmR0VmxLongJmpToRing3(pVCpu);
7479 AssertRCReturn(rc, rc);
7480
7481 VMMRZCallRing3Enable(pVCpu);
7482 return VINF_SUCCESS;
7483}
7484
7485
7486/**
7487 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
7488 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
7489 *
7490 * @param pVCpu The cross context virtual CPU structure.
7491 */
7492DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
7493{
7494 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT))
7495 {
7496 if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
7497 {
7498 pVCpu->hm.s.vmx.Ctls.u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
7499 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls);
7500 AssertRC(rc);
7501 Log4Func(("Setup interrupt-window exiting\n"));
7502 }
7503 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
7504}
7505
7506
7507/**
7508 * Clears the interrupt-window exiting control in the VMCS.
7509 *
7510 * @param pVCpu The cross context virtual CPU structure.
7511 */
7512DECLINLINE(void) hmR0VmxClearIntWindowExitVmcs(PVMCPU pVCpu)
7513{
7514 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT);
7515 pVCpu->hm.s.vmx.Ctls.u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
7516 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls);
7517 AssertRC(rc);
7518 Log4Func(("Cleared interrupt-window exiting\n"));
7519}
7520
7521
7522/**
7523 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
7524 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
7525 *
7526 * @param pVCpu The cross context virtual CPU structure.
7527 */
7528DECLINLINE(void) hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu)
7529{
7530 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
7531 {
7532 if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
7533 {
7534 pVCpu->hm.s.vmx.Ctls.u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
7535 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls);
7536 AssertRC(rc);
7537 Log4Func(("Setup NMI-window exiting\n"));
7538 }
7539 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
7540}
7541
7542
7543/**
7544 * Clears the NMI-window exiting control in the VMCS.
7545 *
7546 * @param pVCpu The cross context virtual CPU structure.
7547 */
7548DECLINLINE(void) hmR0VmxClearNmiWindowExitVmcs(PVMCPU pVCpu)
7549{
7550 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT);
7551 pVCpu->hm.s.vmx.Ctls.u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
7552 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls);
7553 AssertRC(rc);
7554 Log4Func(("Cleared NMI-window exiting\n"));
7555}
7556
7557
7558/**
7559 * Evaluates the event to be delivered to the guest and sets it as the pending
7560 * event.
7561 *
7562 * @returns The VT-x guest-interruptibility state.
7563 * @param pVCpu The cross context virtual CPU structure.
7564 */
7565static uint32_t hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu)
7566{
7567 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
7568 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7569 uint32_t const fIntrState = hmR0VmxGetGuestIntrState(pVCpu);
7570 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
7571 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
7572 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
7573
7574 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS));
7575 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
7576 Assert(!fBlockSti || pCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7577 Assert(!TRPMHasTrap(pVCpu));
7578
7579 /*
7580 * Toggling of interrupt force-flags here is safe since we update TRPM on premature exits
7581 * to ring-3 before executing guest code, see hmR0VmxExitToRing3(). We must NOT restore these force-flags.
7582 */
7583 /** @todo SMI. SMIs take priority over NMIs. */
7584 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */
7585 {
7586 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
7587 if ( !pVCpu->hm.s.Event.fPending
7588 && !fBlockNmi
7589 && !fBlockSti
7590 && !fBlockMovSS)
7591 {
7592 hmR0VmxSetPendingXcptNmi(pVCpu);
7593 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
7594 Log4Func(("Pending NMI\n"));
7595 }
7596 else
7597 hmR0VmxSetNmiWindowExitVmcs(pVCpu);
7598 }
7599 /*
7600 * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns
7601 * a valid interrupt we must- deliver the interrupt. We can no longer re-request it from the APIC.
7602 */
7603 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
7604 && !pVCpu->hm.s.fSingleInstruction)
7605 {
7606 Assert(!DBGFIsStepping(pVCpu));
7607 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);
7608 AssertRCReturn(rc, 0);
7609 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);
7610 if ( !pVCpu->hm.s.Event.fPending
7611 && !fBlockInt
7612 && !fBlockSti
7613 && !fBlockMovSS)
7614 {
7615 uint8_t u8Interrupt;
7616 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
7617 if (RT_SUCCESS(rc))
7618 {
7619 hmR0VmxSetPendingExtInt(pVCpu, u8Interrupt);
7620 Log4Func(("Pending external interrupt vector %#x\n", u8Interrupt));
7621 }
7622 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
7623 {
7624 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
7625 hmR0VmxApicSetTprThreshold(pVCpu, u8Interrupt >> 4);
7626 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
7627
7628 /*
7629 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
7630 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
7631 * need to re-set this force-flag here.
7632 */
7633 }
7634 else
7635 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
7636 }
7637 else
7638 hmR0VmxSetIntWindowExitVmcs(pVCpu);
7639 }
7640
7641 return fIntrState;
7642}
7643
7644
7645/**
7646 * Injects any pending events into the guest if the guest is in a state to
7647 * receive them.
7648 *
7649 * @returns Strict VBox status code (i.e. informational status codes too).
7650 * @param pVCpu The cross context virtual CPU structure.
7651 * @param fIntrState The VT-x guest-interruptibility state.
7652 * @param fStepping Running in hmR0VmxRunGuestCodeStep() and we should
7653 * return VINF_EM_DBG_STEPPED if the event was
7654 * dispatched directly.
7655 */
7656static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, uint32_t fIntrState, bool fStepping)
7657{
7658 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
7659 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7660
7661 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
7662 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
7663
7664 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
7665 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
7666 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7667 Assert(!TRPMHasTrap(pVCpu));
7668
7669 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
7670 if (pVCpu->hm.s.Event.fPending)
7671 {
7672 /*
7673 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
7674 * pending even while injecting an event and in this case, we want a VM-exit as soon as
7675 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
7676 *
7677 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
7678 */
7679 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
7680#ifdef VBOX_STRICT
7681 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
7682 {
7683 bool const fBlockInt = !(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
7684 Assert(!fBlockInt);
7685 Assert(!fBlockSti);
7686 Assert(!fBlockMovSS);
7687 }
7688 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
7689 {
7690 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
7691 Assert(!fBlockSti);
7692 Assert(!fBlockMovSS);
7693 Assert(!fBlockNmi);
7694 }
7695#endif
7696 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
7697 uIntType));
7698
7699 /*
7700 * Inject the event and get any changes to the guest-interruptibility state.
7701 *
7702 * The guest-interruptibility state may need to be updated if we inject the event
7703 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
7704 */
7705 rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,
7706 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping,
7707 &fIntrState);
7708 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
7709
7710 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
7711 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
7712 else
7713 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
7714 }
7715
7716 /*
7717 * Update the guest-interruptibility state.
7718 *
7719 * This is required for the real-on-v86 software interrupt injection case above, as well as
7720 * updates to the guest state from ring-3 or IEM/REM.
7721 */
7722 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7723 AssertRCReturn(rc, rc);
7724
7725 /*
7726 * There's no need to clear the VM-entry interruption-information field here if we're not
7727 * injecting anything. VT-x clears the valid bit on every VM-exit.
7728 *
7729 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7730 */
7731
7732 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
7733 NOREF(fBlockMovSS); NOREF(fBlockSti);
7734 return rcStrict;
7735}
7736
7737
7738/**
7739 * Injects a double-fault (\#DF) exception into the VM.
7740 *
7741 * @returns Strict VBox status code (i.e. informational status codes too).
7742 * @param pVCpu The cross context virtual CPU structure.
7743 * @param fStepping Whether we're running in hmR0VmxRunGuestCodeStep()
7744 * and should return VINF_EM_DBG_STEPPED if the event
7745 * is injected directly (register modified by us, not
7746 * by hardware on VM-entry).
7747 * @param pfIntrState Pointer to the current guest interruptibility-state.
7748 * This interruptibility-state will be updated if
7749 * necessary. This cannot not be NULL.
7750 */
7751DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptDF(PVMCPU pVCpu, bool fStepping, uint32_t *pfIntrState)
7752{
7753 uint32_t const u32IntInfo = X86_XCPT_DF | VMX_EXIT_INT_INFO_VALID
7754 | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT)
7755 | VMX_EXIT_INT_INFO_ERROR_CODE_VALID;
7756 return hmR0VmxInjectEventVmcs(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */, fStepping,
7757 pfIntrState);
7758}
7759
7760
7761/**
7762 * Injects a general-protection (\#GP) fault into the VM.
7763 *
7764 * @returns Strict VBox status code (i.e. informational status codes too).
7765 * @param pVCpu The cross context virtual CPU structure.
7766 * @param fErrorCodeValid Whether the error code is valid (depends on the CPU
7767 * mode, i.e. in real-mode it's not valid).
7768 * @param u32ErrorCode The error code associated with the \#GP.
7769 * @param fStepping Whether we're running in
7770 * hmR0VmxRunGuestCodeStep() and should return
7771 * VINF_EM_DBG_STEPPED if the event is injected
7772 * directly (register modified by us, not by
7773 * hardware on VM-entry).
7774 * @param pfIntrState Pointer to the current guest interruptibility-state.
7775 * This interruptibility-state will be updated if
7776 * necessary. This cannot not be NULL.
7777 */
7778DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptGP(PVMCPU pVCpu, bool fErrorCodeValid, uint32_t u32ErrorCode, bool fStepping,
7779 uint32_t *pfIntrState)
7780{
7781 uint32_t const u32IntInfo = X86_XCPT_GP | VMX_EXIT_INT_INFO_VALID
7782 | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT)
7783 | (fErrorCodeValid ? VMX_EXIT_INT_INFO_ERROR_CODE_VALID : 0);
7784 return hmR0VmxInjectEventVmcs(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */, fStepping,
7785 pfIntrState);
7786}
7787
7788
7789/**
7790 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
7791 * stack.
7792 *
7793 * @returns Strict VBox status code (i.e. informational status codes too).
7794 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
7795 * @param pVCpu The cross context virtual CPU structure.
7796 * @param uValue The value to push to the guest stack.
7797 */
7798static VBOXSTRICTRC hmR0VmxRealModeGuestStackPush(PVMCPU pVCpu, uint16_t uValue)
7799{
7800 /*
7801 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
7802 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
7803 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
7804 */
7805 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7806 if (pCtx->sp == 1)
7807 return VINF_EM_RESET;
7808 pCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
7809 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), pCtx->ss.u64Base + pCtx->sp, &uValue, sizeof(uint16_t));
7810 AssertRC(rc);
7811 return rc;
7812}
7813
7814
7815/**
7816 * Injects an event into the guest upon VM-entry by updating the relevant fields
7817 * in the VM-entry area in the VMCS.
7818 *
7819 * @returns Strict VBox status code (i.e. informational status codes too).
7820 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
7821 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
7822 *
7823 * @param pVCpu The cross context virtual CPU structure.
7824 * @param u64IntInfo The VM-entry interruption-information field.
7825 * @param cbInstr The VM-entry instruction length in bytes (for
7826 * software interrupts, exceptions and privileged
7827 * software exceptions).
7828 * @param u32ErrCode The VM-entry exception error code.
7829 * @param GCPtrFaultAddress The page-fault address for \#PF exceptions.
7830 * @param pfIntrState Pointer to the current guest interruptibility-state.
7831 * This interruptibility-state will be updated if
7832 * necessary. This cannot not be NULL.
7833 * @param fStepping Whether we're running in
7834 * hmR0VmxRunGuestCodeStep() and should return
7835 * VINF_EM_DBG_STEPPED if the event is injected
7836 * directly (register modified by us, not by
7837 * hardware on VM-entry).
7838 */
7839static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, uint64_t u64IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
7840 RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *pfIntrState)
7841{
7842 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
7843 AssertMsg(!RT_HI_U32(u64IntInfo), ("%#RX64\n", u64IntInfo));
7844 Assert(pfIntrState);
7845
7846 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7847 uint32_t u32IntInfo = (uint32_t)u64IntInfo;
7848 uint32_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
7849 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
7850
7851#ifdef VBOX_STRICT
7852 /*
7853 * Validate the error-code-valid bit for hardware exceptions.
7854 * No error codes for exceptions in real-mode.
7855 *
7856 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
7857 */
7858 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
7859 && !CPUMIsGuestInRealModeEx(pCtx))
7860 {
7861 switch (uVector)
7862 {
7863 case X86_XCPT_PF:
7864 case X86_XCPT_DF:
7865 case X86_XCPT_TS:
7866 case X86_XCPT_NP:
7867 case X86_XCPT_SS:
7868 case X86_XCPT_GP:
7869 case X86_XCPT_AC:
7870 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
7871 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
7872 RT_FALL_THRU();
7873 default:
7874 break;
7875 }
7876 }
7877#endif
7878
7879 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
7880 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
7881 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7882
7883 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
7884
7885 /*
7886 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
7887 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
7888 * interrupt handler in the (real-mode) guest.
7889 *
7890 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
7891 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
7892 */
7893 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
7894 {
7895 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest)
7896 {
7897 /*
7898 * For unrestricted execution enabled CPUs running real-mode guests, we must not
7899 * set the deliver-error-code bit.
7900 *
7901 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
7902 */
7903 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
7904 }
7905 else
7906 {
7907 PVM pVM = pVCpu->CTX_SUFF(pVM);
7908 Assert(PDMVmmDevHeapIsEnabled(pVM));
7909 Assert(pVM->hm.s.vmx.pRealModeTSS);
7910
7911 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
7912 int rc2 = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK | CPUMCTX_EXTRN_RIP
7913 | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
7914 AssertRCReturn(rc2, rc2);
7915
7916 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
7917 size_t const cbIdtEntry = sizeof(X86IDTR16);
7918 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
7919 {
7920 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
7921 if (uVector == X86_XCPT_DF)
7922 return VINF_EM_RESET;
7923
7924 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
7925 if (uVector == X86_XCPT_GP)
7926 return hmR0VmxInjectXcptDF(pVCpu, fStepping, pfIntrState);
7927
7928 /*
7929 * If we're injecting an event with no valid IDT entry, inject a #GP.
7930 * No error codes for exceptions in real-mode.
7931 *
7932 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
7933 */
7934 return hmR0VmxInjectXcptGP(pVCpu, false /* fErrCodeValid */, 0 /* u32ErrCode */, fStepping, pfIntrState);
7935 }
7936
7937 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
7938 uint16_t uGuestIp = pCtx->ip;
7939 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
7940 {
7941 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
7942 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
7943 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
7944 }
7945 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
7946 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
7947
7948 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
7949 X86IDTR16 IdtEntry;
7950 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
7951 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
7952 AssertRCReturn(rc2, rc2);
7953
7954 /* Construct the stack frame for the interrupt/exception handler. */
7955 VBOXSTRICTRC rcStrict;
7956 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
7957 if (rcStrict == VINF_SUCCESS)
7958 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
7959 if (rcStrict == VINF_SUCCESS)
7960 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
7961
7962 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
7963 if (rcStrict == VINF_SUCCESS)
7964 {
7965 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
7966 pCtx->rip = IdtEntry.offSel;
7967 pCtx->cs.Sel = IdtEntry.uSel;
7968 pCtx->cs.ValidSel = IdtEntry.uSel;
7969 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
7970 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
7971 && uVector == X86_XCPT_PF)
7972 pCtx->cr2 = GCPtrFaultAddress;
7973
7974 /* If any other guest-state bits are changed here, make sure to update
7975 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */
7976 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
7977 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
7978 | HM_CHANGED_GUEST_RSP);
7979
7980 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
7981 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7982 {
7983 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
7984 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
7985 Log4Func(("Clearing inhibition due to STI\n"));
7986 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7987 }
7988 Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
7989 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
7990
7991 /* The event has been truly dispatched. Mark it as no longer pending so we don't attempt to 'undo'
7992 it, if we are returning to ring-3 before executing guest code. */
7993 pVCpu->hm.s.Event.fPending = false;
7994
7995 /* Make hmR0VmxPreRunGuest() return if we're stepping since we've changed cs:rip. */
7996 if (fStepping)
7997 rcStrict = VINF_EM_DBG_STEPPED;
7998 }
7999 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
8000 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8001 return rcStrict;
8002 }
8003 }
8004
8005 /* Validate. */
8006 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
8007 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
8008
8009 /* Inject. */
8010 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
8011 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
8012 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
8013 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
8014 AssertRCReturn(rc, rc);
8015
8016 /* Update CR2. */
8017 if ( VMX_ENTRY_INT_INFO_TYPE(u32IntInfo) == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
8018 && uVector == X86_XCPT_PF)
8019 pCtx->cr2 = GCPtrFaultAddress;
8020
8021 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
8022
8023 return VINF_SUCCESS;
8024}
8025
8026
8027/**
8028 * Clears the interrupt-window exiting control in the VMCS and if necessary
8029 * clears the current event in the VMCS as well.
8030 *
8031 * @returns VBox status code.
8032 * @param pVCpu The cross context virtual CPU structure.
8033 *
8034 * @remarks Use this function only to clear events that have not yet been
8035 * delivered to the guest but are injected in the VMCS!
8036 * @remarks No-long-jump zone!!!
8037 */
8038static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu)
8039{
8040 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
8041 {
8042 hmR0VmxClearIntWindowExitVmcs(pVCpu);
8043 Log4Func(("Cleared interrupt window\n"));
8044 }
8045
8046 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
8047 {
8048 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
8049 Log4Func(("Cleared NMI window\n"));
8050 }
8051}
8052
8053
8054/**
8055 * Enters the VT-x session.
8056 *
8057 * @returns VBox status code.
8058 * @param pVCpu The cross context virtual CPU structure.
8059 */
8060VMMR0DECL(int) VMXR0Enter(PVMCPU pVCpu)
8061{
8062 AssertPtr(pVCpu);
8063 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported);
8064 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8065
8066 LogFlowFunc(("pVCpu=%p\n", pVCpu));
8067 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
8068 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
8069
8070#ifdef VBOX_STRICT
8071 /* At least verify VMX is enabled, since we can't check if we're in VMX root mode without #GP'ing. */
8072 RTCCUINTREG uHostCR4 = ASMGetCR4();
8073 if (!(uHostCR4 & X86_CR4_VMXE))
8074 {
8075 LogRelFunc(("X86_CR4_VMXE bit in CR4 is not set!\n"));
8076 return VERR_VMX_X86_CR4_VMXE_CLEARED;
8077 }
8078#endif
8079
8080 /*
8081 * Load the VCPU's VMCS as the current (and active) one.
8082 */
8083 Assert(pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_CLEAR);
8084 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
8085 if (RT_SUCCESS(rc))
8086 {
8087 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_ACTIVE;
8088 pVCpu->hm.s.fLeaveDone = false;
8089 Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
8090
8091 /*
8092 * Do the EMT scheduled L1D flush here if needed.
8093 */
8094 if (pVCpu->CTX_SUFF(pVM)->hm.s.fL1dFlushOnSched)
8095 ASMWrMsr(MSR_IA32_FLUSH_CMD, MSR_IA32_FLUSH_CMD_F_L1D);
8096 }
8097 return rc;
8098}
8099
8100
8101/**
8102 * The thread-context callback (only on platforms which support it).
8103 *
8104 * @param enmEvent The thread-context event.
8105 * @param pVCpu The cross context virtual CPU structure.
8106 * @param fGlobalInit Whether global VT-x/AMD-V init. was used.
8107 * @thread EMT(pVCpu)
8108 */
8109VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
8110{
8111 NOREF(fGlobalInit);
8112
8113 switch (enmEvent)
8114 {
8115 case RTTHREADCTXEVENT_OUT:
8116 {
8117 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8118 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
8119 VMCPU_ASSERT_EMT(pVCpu);
8120
8121 /* No longjmps (logger flushes, locks) in this fragile context. */
8122 VMMRZCallRing3Disable(pVCpu);
8123 Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId()));
8124
8125 /*
8126 * Restore host-state (FPU, debug etc.)
8127 */
8128 if (!pVCpu->hm.s.fLeaveDone)
8129 {
8130 /*
8131 * Do -not- import the guest-state here as we might already be in the middle of importing
8132 * it, esp. bad if we're holding the PGM lock, see comment in hmR0VmxImportGuestState().
8133 */
8134 hmR0VmxLeave(pVCpu, false /* fImportState */);
8135 pVCpu->hm.s.fLeaveDone = true;
8136 }
8137
8138 /* Leave HM context, takes care of local init (term). */
8139 int rc = HMR0LeaveCpu(pVCpu);
8140 AssertRC(rc); NOREF(rc);
8141
8142 /* Restore longjmp state. */
8143 VMMRZCallRing3Enable(pVCpu);
8144 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreempt);
8145 break;
8146 }
8147
8148 case RTTHREADCTXEVENT_IN:
8149 {
8150 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8151 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
8152 VMCPU_ASSERT_EMT(pVCpu);
8153
8154 /* No longjmps here, as we don't want to trigger preemption (& its hook) while resuming. */
8155 VMMRZCallRing3Disable(pVCpu);
8156 Log4Func(("Resumed: HostCpuId=%u\n", RTMpCpuId()));
8157
8158 /* Initialize the bare minimum state required for HM. This takes care of
8159 initializing VT-x if necessary (onlined CPUs, local init etc.) */
8160 int rc = hmR0EnterCpu(pVCpu);
8161 AssertRC(rc);
8162 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
8163 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
8164
8165 /* Load the active VMCS as the current one. */
8166 if (pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_CLEAR)
8167 {
8168 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
8169 AssertRC(rc); NOREF(rc);
8170 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_ACTIVE;
8171 Log4Func(("Resumed: Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
8172 }
8173 pVCpu->hm.s.fLeaveDone = false;
8174
8175 /* Do the EMT scheduled L1D flush if needed. */
8176 if (pVCpu->CTX_SUFF(pVM)->hm.s.fL1dFlushOnSched)
8177 ASMWrMsr(MSR_IA32_FLUSH_CMD, MSR_IA32_FLUSH_CMD_F_L1D);
8178
8179 /* Restore longjmp state. */
8180 VMMRZCallRing3Enable(pVCpu);
8181 break;
8182 }
8183
8184 default:
8185 break;
8186 }
8187}
8188
8189
8190/**
8191 * Exports the host state into the VMCS host-state area.
8192 * Sets up the VM-exit MSR-load area.
8193 *
8194 * The CPU state will be loaded from these fields on every successful VM-exit.
8195 *
8196 * @returns VBox status code.
8197 * @param pVCpu The cross context virtual CPU structure.
8198 *
8199 * @remarks No-long-jump zone!!!
8200 */
8201static int hmR0VmxExportHostState(PVMCPU pVCpu)
8202{
8203 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8204
8205 int rc = VINF_SUCCESS;
8206 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)
8207 {
8208 rc = hmR0VmxExportHostControlRegs();
8209 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8210
8211 rc = hmR0VmxExportHostSegmentRegs(pVCpu);
8212 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8213
8214 rc = hmR0VmxExportHostMsrs(pVCpu);
8215 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8216
8217 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_HOST_CONTEXT;
8218 }
8219 return rc;
8220}
8221
8222
8223/**
8224 * Saves the host state in the VMCS host-state.
8225 *
8226 * @returns VBox status code.
8227 * @param pVCpu The cross context virtual CPU structure.
8228 *
8229 * @remarks No-long-jump zone!!!
8230 */
8231VMMR0DECL(int) VMXR0ExportHostState(PVMCPU pVCpu)
8232{
8233 AssertPtr(pVCpu);
8234 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8235
8236 /*
8237 * Export the host state here while entering HM context.
8238 * When thread-context hooks are used, we might get preempted and have to re-save the host
8239 * state but most of the time we won't be, so do it here before we disable interrupts.
8240 */
8241 return hmR0VmxExportHostState(pVCpu);
8242}
8243
8244
8245/**
8246 * Exports the guest state into the VMCS guest-state area.
8247 *
8248 * The will typically be done before VM-entry when the guest-CPU state and the
8249 * VMCS state may potentially be out of sync.
8250 *
8251 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas. Sets up the
8252 * VM-entry controls.
8253 * Sets up the appropriate VMX non-root function to execute guest code based on
8254 * the guest CPU mode.
8255 *
8256 * @returns VBox strict status code.
8257 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
8258 * without unrestricted guest access and the VMMDev is not presently
8259 * mapped (e.g. EFI32).
8260 *
8261 * @param pVCpu The cross context virtual CPU structure.
8262 *
8263 * @remarks No-long-jump zone!!!
8264 */
8265static VBOXSTRICTRC hmR0VmxExportGuestState(PVMCPU pVCpu)
8266{
8267 AssertPtr(pVCpu);
8268 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
8269
8270 LogFlowFunc(("pVCpu=%p\n", pVCpu));
8271
8272 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);
8273
8274 /* Determine real-on-v86 mode. */
8275 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
8276 if ( !pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest
8277 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx))
8278 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
8279
8280 /*
8281 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
8282 * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
8283 */
8284 int rc = hmR0VmxSelectVMRunHandler(pVCpu);
8285 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8286
8287 /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */
8288 rc = hmR0VmxExportGuestEntryCtls(pVCpu);
8289 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8290
8291 /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */
8292 rc = hmR0VmxExportGuestExitCtls(pVCpu);
8293 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8294
8295 rc = hmR0VmxExportGuestCR0(pVCpu);
8296 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8297
8298 VBOXSTRICTRC rcStrict = hmR0VmxExportGuestCR3AndCR4(pVCpu);
8299 if (rcStrict == VINF_SUCCESS)
8300 { /* likely */ }
8301 else
8302 {
8303 Assert(rcStrict == VINF_EM_RESCHEDULE_REM || RT_FAILURE_NP(rcStrict));
8304 return rcStrict;
8305 }
8306
8307 rc = hmR0VmxExportGuestSegmentRegs(pVCpu);
8308 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8309
8310 /* This needs to be done after hmR0VmxExportGuestEntryCtls() and hmR0VmxExportGuestExitCtls() as it
8311 may alter controls if we determine we don't have to swap EFER after all. */
8312 rc = hmR0VmxExportGuestMsrs(pVCpu);
8313 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8314
8315 rc = hmR0VmxExportGuestApicTpr(pVCpu);
8316 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8317
8318 rc = hmR0VmxExportGuestXcptIntercepts(pVCpu);
8319 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8320
8321 rc = hmR0VmxExportGuestRip(pVCpu);
8322 rc |= hmR0VmxExportGuestRsp(pVCpu);
8323 rc |= hmR0VmxExportGuestRflags(pVCpu);
8324 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8325
8326 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
8327 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP)
8328 | HM_CHANGED_GUEST_CR2
8329 | (HM_CHANGED_GUEST_DR_MASK & ~HM_CHANGED_GUEST_DR7)
8330 | HM_CHANGED_GUEST_X87
8331 | HM_CHANGED_GUEST_SSE_AVX
8332 | HM_CHANGED_GUEST_OTHER_XSAVE
8333 | HM_CHANGED_GUEST_XCRx
8334 | HM_CHANGED_GUEST_KERNEL_GS_BASE /* Part of lazy or auto load-store MSRs. */
8335 | HM_CHANGED_GUEST_SYSCALL_MSRS /* Part of lazy or auto load-store MSRs. */
8336 | HM_CHANGED_GUEST_TSC_AUX
8337 | HM_CHANGED_GUEST_OTHER_MSRS
8338 | HM_CHANGED_GUEST_HWVIRT
8339 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK)));
8340
8341 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x);
8342 return rc;
8343}
8344
8345
8346/**
8347 * Exports the state shared between the host and guest into the VMCS.
8348 *
8349 * @param pVCpu The cross context virtual CPU structure.
8350 *
8351 * @remarks No-long-jump zone!!!
8352 */
8353static void hmR0VmxExportSharedState(PVMCPU pVCpu)
8354{
8355 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8356 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8357
8358 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
8359 {
8360 int rc = hmR0VmxExportSharedDebugState(pVCpu);
8361 AssertRC(rc);
8362 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
8363
8364 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
8365 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_RFLAGS)
8366 {
8367 rc = hmR0VmxExportGuestRflags(pVCpu);
8368 AssertRC(rc);
8369 }
8370 }
8371
8372 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_GUEST_LAZY_MSRS)
8373 {
8374 hmR0VmxLazyLoadGuestMsrs(pVCpu);
8375 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_VMX_GUEST_LAZY_MSRS;
8376 }
8377
8378 AssertMsg(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE),
8379 ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
8380}
8381
8382
8383/**
8384 * Worker for loading the guest-state bits in the inner VT-x execution loop.
8385 *
8386 * @returns Strict VBox status code (i.e. informational status codes too).
8387 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
8388 * without unrestricted guest access and the VMMDev is not presently
8389 * mapped (e.g. EFI32).
8390 *
8391 * @param pVCpu The cross context virtual CPU structure.
8392 *
8393 * @remarks No-long-jump zone!!!
8394 */
8395static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVMCPU pVCpu)
8396{
8397 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
8398 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8399 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8400
8401#ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
8402 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
8403#endif
8404
8405 /*
8406 * For many exits it's only RIP that changes and hence try to export it first
8407 * without going through a lot of change flag checks.
8408 */
8409 VBOXSTRICTRC rcStrict;
8410 uint64_t fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
8411 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
8412 if ((fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) == HM_CHANGED_GUEST_RIP)
8413 {
8414 rcStrict = hmR0VmxExportGuestRip(pVCpu);
8415 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8416 { /* likely */}
8417 else
8418 AssertMsgFailedReturn(("hmR0VmxExportGuestRip failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
8419 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportMinimal);
8420 }
8421 else if (fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
8422 {
8423 rcStrict = hmR0VmxExportGuestState(pVCpu);
8424 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8425 { /* likely */}
8426 else
8427 {
8428 AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM, ("hmR0VmxExportGuestState failed! rc=%Rrc\n",
8429 VBOXSTRICTRC_VAL(rcStrict)));
8430 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8431 return rcStrict;
8432 }
8433 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
8434 }
8435 else
8436 rcStrict = VINF_SUCCESS;
8437
8438#ifdef VBOX_STRICT
8439 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
8440 fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
8441 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
8442 AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)),
8443 ("fCtxChanged=%#RX64\n", fCtxChanged));
8444#endif
8445 return rcStrict;
8446}
8447
8448
8449/**
8450 * Setup the APIC-access page for virtualizing APIC access.
8451 *
8452 * This can cause a longjumps to R3 due to the acquisition of the PGM lock, hence
8453 * this not done as part of exporting guest state, see @bugref{8721}.
8454 *
8455 * @returns VBox status code.
8456 * @param pVCpu The cross context virtual CPU structure.
8457 */
8458static int hmR0VmxMapHCApicAccessPage(PVMCPU pVCpu)
8459{
8460 PVM pVM = pVCpu->CTX_SUFF(pVM);
8461 uint64_t const u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu);
8462
8463 Assert(PDMHasApic(pVM));
8464 Assert(u64MsrApicBase);
8465
8466 RTGCPHYS const GCPhysApicBase = u64MsrApicBase & PAGE_BASE_GC_MASK;
8467 Log4Func(("Mappping HC APIC-access page at %#RGp\n", GCPhysApicBase));
8468
8469 /* Unalias any existing mapping. */
8470 int rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
8471 AssertRCReturn(rc, rc);
8472
8473 /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables if necessary. */
8474 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
8475 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
8476 AssertRCReturn(rc, rc);
8477
8478 /* Update the per-VCPU cache of the APIC base MSR. */
8479 pVCpu->hm.s.vmx.u64MsrApicBase = u64MsrApicBase;
8480 return VINF_SUCCESS;
8481}
8482
8483
8484/**
8485 * Does the preparations before executing guest code in VT-x.
8486 *
8487 * This may cause longjmps to ring-3 and may even result in rescheduling to the
8488 * recompiler/IEM. We must be cautious what we do here regarding committing
8489 * guest-state information into the VMCS assuming we assuredly execute the
8490 * guest in VT-x mode.
8491 *
8492 * If we fall back to the recompiler/IEM after updating the VMCS and clearing
8493 * the common-state (TRPM/forceflags), we must undo those changes so that the
8494 * recompiler/IEM can (and should) use them when it resumes guest execution.
8495 * Otherwise such operations must be done when we can no longer exit to ring-3.
8496 *
8497 * @returns Strict VBox status code (i.e. informational status codes too).
8498 * @retval VINF_SUCCESS if we can proceed with running the guest, interrupts
8499 * have been disabled.
8500 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a
8501 * double-fault into the guest.
8502 * @retval VINF_EM_DBG_STEPPED if @a fStepping is true and an event was
8503 * dispatched directly.
8504 * @retval VINF_* scheduling changes, we have to go back to ring-3.
8505 *
8506 * @param pVCpu The cross context virtual CPU structure.
8507 * @param pVmxTransient Pointer to the VMX transient structure.
8508 * @param fStepping Set if called from hmR0VmxRunGuestCodeStep(). Makes
8509 * us ignore some of the reasons for returning to
8510 * ring-3, and return VINF_EM_DBG_STEPPED if event
8511 * dispatching took place.
8512 */
8513static VBOXSTRICTRC hmR0VmxPreRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, bool fStepping)
8514{
8515 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8516
8517#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
8518 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
8519 {
8520 Log2(("hmR0VmxPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
8521 RT_NOREF3(pVCpu, pVmxTransient, fStepping);
8522 return VINF_EM_RESCHEDULE_REM;
8523 }
8524#endif
8525
8526#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
8527 PGMRZDynMapFlushAutoSet(pVCpu);
8528#endif
8529
8530 /*
8531 * Check and process force flag actions, some of which might require us to go back to ring-3.
8532 */
8533 VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVCpu, fStepping);
8534 if (rcStrict == VINF_SUCCESS)
8535 { /* FFs doesn't get set all the time. */ }
8536 else
8537 return rcStrict;
8538
8539 /*
8540 * Virtualize memory-mapped accesses to the physical APIC (may take locks).
8541 */
8542 PVM pVM = pVCpu->CTX_SUFF(pVM);
8543 if ( !pVCpu->hm.s.vmx.u64MsrApicBase
8544 && (pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
8545 && PDMHasApic(pVM))
8546 {
8547 int rc = hmR0VmxMapHCApicAccessPage(pVCpu);
8548 AssertRCReturn(rc, rc);
8549 }
8550
8551 if (TRPMHasTrap(pVCpu))
8552 hmR0VmxTrpmTrapToPendingEvent(pVCpu);
8553 uint32_t fIntrState = hmR0VmxEvaluatePendingEvent(pVCpu);
8554
8555 /*
8556 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus
8557 * needs to be done with longjmps or interrupts + preemption enabled. Event injection might
8558 * also result in triple-faulting the VM.
8559 */
8560 rcStrict = hmR0VmxInjectPendingEvent(pVCpu, fIntrState, fStepping);
8561 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8562 { /* likely */ }
8563 else
8564 {
8565 AssertMsg(rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
8566 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8567 return rcStrict;
8568 }
8569
8570 /*
8571 * A longjump might result in importing CR3 even for VM-exits that don't necessarily
8572 * import CR3 themselves. We will need to update them here, as even as late as the above
8573 * hmR0VmxInjectPendingEvent() call may lazily import guest-CPU state on demand causing
8574 * the below force flags to be set.
8575 */
8576 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
8577 {
8578 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
8579 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
8580 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
8581 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
8582 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
8583 }
8584 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
8585 {
8586 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
8587 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
8588 }
8589
8590 /*
8591 * No longjmps to ring-3 from this point on!!!
8592 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
8593 * This also disables flushing of the R0-logger instance (if any).
8594 */
8595 VMMRZCallRing3Disable(pVCpu);
8596
8597 /*
8598 * Export the guest state bits.
8599 *
8600 * We cannot perform longjmps while loading the guest state because we do not preserve the
8601 * host/guest state (although the VMCS will be preserved) across longjmps which can cause
8602 * CPU migration.
8603 *
8604 * If we are injecting events to a real-on-v86 mode guest, we will have to update
8605 * RIP and some segment registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs().
8606 * Hence, loading of the guest state needs to be done -after- injection of events.
8607 */
8608 rcStrict = hmR0VmxExportGuestStateOptimal(pVCpu);
8609 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8610 { /* likely */ }
8611 else
8612 {
8613 VMMRZCallRing3Enable(pVCpu);
8614 return rcStrict;
8615 }
8616
8617 /*
8618 * We disable interrupts so that we don't miss any interrupts that would flag preemption
8619 * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with
8620 * preemption disabled for a while. Since this is purly to aid the
8621 * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and
8622 * disable interrupt on NT.
8623 *
8624 * We need to check for force-flags that could've possible been altered since we last
8625 * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section,
8626 * see @bugref{6398}).
8627 *
8628 * We also check a couple of other force-flags as a last opportunity to get the EMT back
8629 * to ring-3 before executing guest code.
8630 */
8631 pVmxTransient->fEFlags = ASMIntDisableFlags();
8632
8633 if ( ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
8634 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
8635 || ( fStepping /* Optimized for the non-stepping case, so a bit of unnecessary work when stepping. */
8636 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) )
8637 {
8638 if (!RTThreadPreemptIsPending(NIL_RTTHREAD))
8639 {
8640 pVCpu->hm.s.Event.fPending = false;
8641
8642 /*
8643 * We've injected any pending events. This is really the point of no return (to ring-3).
8644 *
8645 * Note! The caller expects to continue with interrupts & longjmps disabled on successful
8646 * returns from this function, so don't enable them here.
8647 */
8648 return VINF_SUCCESS;
8649 }
8650
8651 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPendingHostIrq);
8652 rcStrict = VINF_EM_RAW_INTERRUPT;
8653 }
8654 else
8655 {
8656 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
8657 rcStrict = VINF_EM_RAW_TO_R3;
8658 }
8659
8660 ASMSetFlags(pVmxTransient->fEFlags);
8661 VMMRZCallRing3Enable(pVCpu);
8662
8663 return rcStrict;
8664}
8665
8666
8667/**
8668 * Prepares to run guest code in VT-x and we've committed to doing so. This
8669 * means there is no backing out to ring-3 or anywhere else at this
8670 * point.
8671 *
8672 * @param pVCpu The cross context virtual CPU structure.
8673 * @param pVmxTransient Pointer to the VMX transient structure.
8674 *
8675 * @remarks Called with preemption disabled.
8676 * @remarks No-long-jump zone!!!
8677 */
8678static void hmR0VmxPreRunGuestCommitted(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
8679{
8680 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8681 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8682 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8683
8684 /*
8685 * Indicate start of guest execution and where poking EMT out of guest-context is recognized.
8686 */
8687 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8688 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
8689
8690 PVM pVM = pVCpu->CTX_SUFF(pVM);
8691 if (!CPUMIsGuestFPUStateActive(pVCpu))
8692 {
8693 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);
8694 if (CPUMR0LoadGuestFPU(pVM, pVCpu) == VINF_CPUM_HOST_CR0_MODIFIED)
8695 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT;
8696 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);
8697 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);
8698 }
8699
8700 /*
8701 * Lazy-update of the host MSRs values in the auto-load/store MSR area.
8702 */
8703 if ( !pVCpu->hm.s.vmx.fUpdatedHostMsrs
8704 && pVCpu->hm.s.vmx.cMsrs > 0)
8705 hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu);
8706
8707 /*
8708 * Re-save the host state bits as we may've been preempted (only happens when
8709 * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM).
8710 * Note that the 64-on-32 switcher saves the (64-bit) host state into the VMCS and
8711 * if we change the switcher back to 32-bit, we *must* save the 32-bit host state here.
8712 * See @bugref{8432}.
8713 */
8714 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)
8715 {
8716 int rc = hmR0VmxExportHostState(pVCpu);
8717 AssertRC(rc);
8718 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreemptExportHostState);
8719 }
8720 Assert(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT));
8721
8722 /*
8723 * Export the state shared between host and guest (FPU, debug, lazy MSRs).
8724 */
8725 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)
8726 hmR0VmxExportSharedState(pVCpu);
8727 AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
8728
8729 /* Store status of the shared guest-host state at the time of VM-entry. */
8730#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
8731 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
8732 {
8733 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
8734 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
8735 }
8736 else
8737#endif
8738 {
8739 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
8740 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
8741 }
8742
8743 /*
8744 * Cache the TPR-shadow for checking on every VM-exit if it might have changed.
8745 */
8746 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
8747 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR];
8748
8749 PHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
8750 RTCPUID idCurrentCpu = pHostCpu->idCpu;
8751 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
8752 || idCurrentCpu != pVCpu->hm.s.idLastCpu)
8753 {
8754 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu);
8755 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
8756 }
8757
8758 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */
8759 hmR0VmxFlushTaggedTlb(pHostCpu, pVCpu); /* Invalidate the appropriate guest entries from the TLB. */
8760 Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
8761 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Update the error reporting info. with the current host CPU. */
8762
8763 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
8764
8765 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
8766 to start executing. */
8767
8768 /*
8769 * Load the TSC_AUX MSR when we are not intercepting RDTSCP.
8770 */
8771 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & VMX_PROC_CTLS2_RDTSCP)
8772 {
8773 if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))
8774 {
8775 bool fMsrUpdated;
8776 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_TSC_AUX);
8777 int rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMGetGuestTscAux(pVCpu), true /* fUpdateHostMsr */,
8778 &fMsrUpdated);
8779 AssertRC(rc2);
8780 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8781 /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */
8782 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
8783 }
8784 else
8785 {
8786 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX);
8787 Assert(!pVCpu->hm.s.vmx.cMsrs || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8788 }
8789 }
8790
8791 if (pVM->cpum.ro.GuestFeatures.fIbrs)
8792 {
8793 bool fMsrUpdated;
8794 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_OTHER_MSRS);
8795 int rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu), true /* fUpdateHostMsr */,
8796 &fMsrUpdated);
8797 AssertRC(rc2);
8798 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8799 /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */
8800 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
8801 }
8802
8803#ifdef VBOX_STRICT
8804 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu);
8805 hmR0VmxCheckHostEferMsr(pVCpu);
8806 AssertRC(hmR0VmxCheckVmcsCtls(pVCpu));
8807#endif
8808#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
8809 if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8810 {
8811 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu);
8812 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
8813 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
8814 }
8815#endif
8816}
8817
8818
8819/**
8820 * Performs some essential restoration of state after running guest code in
8821 * VT-x.
8822 *
8823 * @param pVCpu The cross context virtual CPU structure.
8824 * @param pVmxTransient Pointer to the VMX transient structure.
8825 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
8826 *
8827 * @remarks Called with interrupts disabled, and returns with interrupts enabled!
8828 *
8829 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
8830 * unconditionally when it is safe to do so.
8831 */
8832static void hmR0VmxPostRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, int rcVMRun)
8833{
8834 uint64_t const uHostTsc = ASMReadTSC();
8835 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8836
8837 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
8838 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */
8839 pVCpu->hm.s.fCtxChanged = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */
8840 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
8841 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
8842 pVmxTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
8843
8844 if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))
8845 TMCpuTickSetLastSeen(pVCpu, uHostTsc + pVCpu->hm.s.vmx.Ctls.u64TscOffset);
8846
8847 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x);
8848 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
8849 Assert(!ASMIntAreEnabled());
8850 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8851
8852#if HC_ARCH_BITS == 64
8853 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Host state messed up by VT-x, we must restore. */
8854#endif
8855#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
8856 /* The 64-on-32 switcher maintains fVmcsState on its own and we need to leave it alone here. */
8857 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
8858 pVCpu->hm.s.vmx.fVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
8859#else
8860 pVCpu->hm.s.vmx.fVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
8861#endif
8862#ifdef VBOX_STRICT
8863 hmR0VmxCheckHostEferMsr(pVCpu); /* Verify that VMRUN/VMLAUNCH didn't modify host EFER. */
8864#endif
8865 ASMSetFlags(pVmxTransient->fEFlags); /* Enable interrupts. */
8866
8867 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
8868 uint32_t uExitReason;
8869 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
8870 rc |= hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
8871 AssertRC(rc);
8872 pVmxTransient->uExitReason = VMX_EXIT_REASON_BASIC(uExitReason);
8873 pVmxTransient->fVMEntryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
8874
8875 if (rcVMRun == VINF_SUCCESS)
8876 {
8877 /*
8878 * Update the VM-exit history array here even if the VM-entry failed due to:
8879 * - Invalid guest state.
8880 * - MSR loading.
8881 * - Machine-check event.
8882 *
8883 * In any of the above cases we will still have a "valid" VM-exit reason
8884 * despite @a fVMEntryFailed being false.
8885 *
8886 * See Intel spec. 26.7 "VM-Entry failures during or after loading guest state".
8887 *
8888 * Note! We don't have CS or RIP at this point. Will probably address that later
8889 * by amending the history entry added here.
8890 */
8891 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_VMX, pVmxTransient->uExitReason & EMEXIT_F_TYPE_MASK),
8892 UINT64_MAX, uHostTsc);
8893
8894 if (!pVmxTransient->fVMEntryFailed)
8895 {
8896 VMMRZCallRing3Enable(pVCpu);
8897
8898 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
8899 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
8900
8901#if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
8902 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
8903 AssertRC(rc);
8904#elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
8905 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_RFLAGS);
8906 AssertRC(rc);
8907#else
8908 /*
8909 * Import the guest-interruptibility state always as we need it while evaluating
8910 * injecting events on re-entry.
8911 *
8912 * We don't import CR0 (when Unrestricted guest execution is unavailable) despite
8913 * checking for real-mode while exporting the state because all bits that cause
8914 * mode changes wrt CR0 are intercepted.
8915 */
8916 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_HM_VMX_INT_STATE);
8917 AssertRC(rc);
8918#endif
8919
8920 /*
8921 * Sync the TPR shadow with our APIC state.
8922 */
8923 if ( (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
8924 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR])
8925 {
8926 rc = APICSetTpr(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR]);
8927 AssertRC(rc);
8928 ASMAtomicOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8929 }
8930
8931 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8932 return;
8933 }
8934 }
8935 else
8936 Log4Func(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed));
8937
8938 VMMRZCallRing3Enable(pVCpu);
8939}
8940
8941
8942/**
8943 * Runs the guest code using VT-x the normal way.
8944 *
8945 * @returns VBox status code.
8946 * @param pVCpu The cross context virtual CPU structure.
8947 *
8948 * @note Mostly the same as hmR0VmxRunGuestCodeStep().
8949 */
8950static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVMCPU pVCpu)
8951{
8952 VMXTRANSIENT VmxTransient;
8953 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
8954 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
8955 uint32_t cLoops = 0;
8956
8957 for (;; cLoops++)
8958 {
8959 Assert(!HMR0SuspendPending());
8960 HMVMX_ASSERT_CPU_SAFE(pVCpu);
8961
8962 /* Preparatory work for running guest code, this may force us to return
8963 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
8964 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
8965 rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */);
8966 if (rcStrict != VINF_SUCCESS)
8967 break;
8968
8969 hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient);
8970 int rcRun = hmR0VmxRunGuest(pVCpu);
8971
8972 /* Restore any residual host-state and save any bits shared between host
8973 and guest into the guest-CPU state. Re-enables interrupts! */
8974 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
8975
8976 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
8977 if (RT_SUCCESS(rcRun))
8978 { /* very likely */ }
8979 else
8980 {
8981 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
8982 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
8983 return rcRun;
8984 }
8985
8986 /* Profile the VM-exit. */
8987 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
8988 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
8989 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
8990 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
8991 HMVMX_START_EXIT_DISPATCH_PROF();
8992
8993 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
8994
8995 /* Handle the VM-exit. */
8996#ifdef HMVMX_USE_FUNCTION_TABLE
8997 rcStrict = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, &VmxTransient);
8998#else
8999 rcStrict = hmR0VmxHandleExit(pVCpu, &VmxTransient, VmxTransient.uExitReason);
9000#endif
9001 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
9002 if (rcStrict == VINF_SUCCESS)
9003 {
9004 if (cLoops <= pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops)
9005 continue; /* likely */
9006 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
9007 rcStrict = VINF_EM_RAW_INTERRUPT;
9008 }
9009 break;
9010 }
9011
9012 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
9013 return rcStrict;
9014}
9015
9016#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9017/**
9018 * Runs the nested-guest code using VT-x the normal way.
9019 *
9020 * @returns VBox status code.
9021 * @param pVCpu The cross context virtual CPU structure.
9022 * @sa hmR0VmxRunGuestCodeNormal.
9023 */
9024static VBOXSTRICTRC hmR0VmxRunGuestCodeNested(PVMCPU pVCpu)
9025{
9026 RT_NOREF(pVCpu);
9027 return VERR_NOT_IMPLEMENTED;
9028}
9029#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9030
9031
9032/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
9033 * probes.
9034 *
9035 * The following few functions and associated structure contains the bloat
9036 * necessary for providing detailed debug events and dtrace probes as well as
9037 * reliable host side single stepping. This works on the principle of
9038 * "subclassing" the normal execution loop and workers. We replace the loop
9039 * method completely and override selected helpers to add necessary adjustments
9040 * to their core operation.
9041 *
9042 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
9043 * any performance for debug and analysis features.
9044 *
9045 * @{
9046 */
9047
9048/**
9049 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
9050 * the debug run loop.
9051 */
9052typedef struct VMXRUNDBGSTATE
9053{
9054 /** The RIP we started executing at. This is for detecting that we stepped. */
9055 uint64_t uRipStart;
9056 /** The CS we started executing with. */
9057 uint16_t uCsStart;
9058
9059 /** Whether we've actually modified the 1st execution control field. */
9060 bool fModifiedProcCtls : 1;
9061 /** Whether we've actually modified the 2nd execution control field. */
9062 bool fModifiedProcCtls2 : 1;
9063 /** Whether we've actually modified the exception bitmap. */
9064 bool fModifiedXcptBitmap : 1;
9065
9066 /** We desire the modified the CR0 mask to be cleared. */
9067 bool fClearCr0Mask : 1;
9068 /** We desire the modified the CR4 mask to be cleared. */
9069 bool fClearCr4Mask : 1;
9070 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
9071 uint32_t fCpe1Extra;
9072 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
9073 uint32_t fCpe1Unwanted;
9074 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
9075 uint32_t fCpe2Extra;
9076 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
9077 uint32_t bmXcptExtra;
9078 /** The sequence number of the Dtrace provider settings the state was
9079 * configured against. */
9080 uint32_t uDtraceSettingsSeqNo;
9081 /** VM-exits to check (one bit per VM-exit). */
9082 uint32_t bmExitsToCheck[3];
9083
9084 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
9085 uint32_t fProcCtlsInitial;
9086 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
9087 uint32_t fProcCtls2Initial;
9088 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
9089 uint32_t bmXcptInitial;
9090} VMXRUNDBGSTATE;
9091AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
9092typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
9093
9094
9095/**
9096 * Initializes the VMXRUNDBGSTATE structure.
9097 *
9098 * @param pVCpu The cross context virtual CPU structure of the
9099 * calling EMT.
9100 * @param pDbgState The structure to initialize.
9101 */
9102static void hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState)
9103{
9104 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
9105 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
9106
9107 pDbgState->fModifiedProcCtls = false;
9108 pDbgState->fModifiedProcCtls2 = false;
9109 pDbgState->fModifiedXcptBitmap = false;
9110 pDbgState->fClearCr0Mask = false;
9111 pDbgState->fClearCr4Mask = false;
9112 pDbgState->fCpe1Extra = 0;
9113 pDbgState->fCpe1Unwanted = 0;
9114 pDbgState->fCpe2Extra = 0;
9115 pDbgState->bmXcptExtra = 0;
9116 pDbgState->fProcCtlsInitial = pVCpu->hm.s.vmx.Ctls.u32ProcCtls;
9117 pDbgState->fProcCtls2Initial = pVCpu->hm.s.vmx.Ctls.u32ProcCtls2;
9118 pDbgState->bmXcptInitial = pVCpu->hm.s.vmx.Ctls.u32XcptBitmap;
9119}
9120
9121
9122/**
9123 * Updates the VMSC fields with changes requested by @a pDbgState.
9124 *
9125 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
9126 * immediately before executing guest code, i.e. when interrupts are disabled.
9127 * We don't check status codes here as we cannot easily assert or return in the
9128 * latter case.
9129 *
9130 * @param pVCpu The cross context virtual CPU structure.
9131 * @param pDbgState The debug state.
9132 */
9133static void hmR0VmxPreRunGuestDebugStateApply(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState)
9134{
9135 /*
9136 * Ensure desired flags in VMCS control fields are set.
9137 * (Ignoring write failure here, as we're committed and it's just debug extras.)
9138 *
9139 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
9140 * there should be no stale data in pCtx at this point.
9141 */
9142 if ( (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
9143 || (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & pDbgState->fCpe1Unwanted))
9144 {
9145 pVCpu->hm.s.vmx.Ctls.u32ProcCtls |= pDbgState->fCpe1Extra;
9146 pVCpu->hm.s.vmx.Ctls.u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
9147 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls);
9148 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVCpu->hm.s.vmx.Ctls.u32ProcCtls));
9149 pDbgState->fModifiedProcCtls = true;
9150 }
9151
9152 if ((pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
9153 {
9154 pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 |= pDbgState->fCpe2Extra;
9155 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pVCpu->hm.s.vmx.Ctls.u32ProcCtls2);
9156 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVCpu->hm.s.vmx.Ctls.u32ProcCtls2));
9157 pDbgState->fModifiedProcCtls2 = true;
9158 }
9159
9160 if ((pVCpu->hm.s.vmx.Ctls.u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
9161 {
9162 pVCpu->hm.s.vmx.Ctls.u32XcptBitmap |= pDbgState->bmXcptExtra;
9163 VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.Ctls.u32XcptBitmap);
9164 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVCpu->hm.s.vmx.Ctls.u32XcptBitmap));
9165 pDbgState->fModifiedXcptBitmap = true;
9166 }
9167
9168 if (pDbgState->fClearCr0Mask && pVCpu->hm.s.vmx.Ctls.u32Cr0Mask != 0)
9169 {
9170 pVCpu->hm.s.vmx.Ctls.u32Cr0Mask = 0;
9171 VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, 0);
9172 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
9173 }
9174
9175 if (pDbgState->fClearCr4Mask && pVCpu->hm.s.vmx.Ctls.u32Cr4Mask != 0)
9176 {
9177 pVCpu->hm.s.vmx.Ctls.u32Cr4Mask = 0;
9178 VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, 0);
9179 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
9180 }
9181}
9182
9183
9184/**
9185 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
9186 * re-entry next time around.
9187 *
9188 * @returns Strict VBox status code (i.e. informational status codes too).
9189 * @param pVCpu The cross context virtual CPU structure.
9190 * @param pDbgState The debug state.
9191 * @param rcStrict The return code from executing the guest using single
9192 * stepping.
9193 */
9194static VBOXSTRICTRC hmR0VmxRunDebugStateRevert(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, VBOXSTRICTRC rcStrict)
9195{
9196 /*
9197 * Restore VM-exit control settings as we may not reenter this function the
9198 * next time around.
9199 */
9200 /* We reload the initial value, trigger what we can of recalculations the
9201 next time around. From the looks of things, that's all that's required atm. */
9202 if (pDbgState->fModifiedProcCtls)
9203 {
9204 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
9205 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
9206 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
9207 AssertRCReturn(rc2, rc2);
9208 pVCpu->hm.s.vmx.Ctls.u32ProcCtls = pDbgState->fProcCtlsInitial;
9209 }
9210
9211 /* We're currently the only ones messing with this one, so just restore the
9212 cached value and reload the field. */
9213 if ( pDbgState->fModifiedProcCtls2
9214 && pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 != pDbgState->fProcCtls2Initial)
9215 {
9216 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
9217 AssertRCReturn(rc2, rc2);
9218 pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 = pDbgState->fProcCtls2Initial;
9219 }
9220
9221 /* If we've modified the exception bitmap, we restore it and trigger
9222 reloading and partial recalculation the next time around. */
9223 if (pDbgState->fModifiedXcptBitmap)
9224 pVCpu->hm.s.vmx.Ctls.u32XcptBitmap = pDbgState->bmXcptInitial;
9225
9226 return rcStrict;
9227}
9228
9229
9230/**
9231 * Configures VM-exit controls for current DBGF and DTrace settings.
9232 *
9233 * This updates @a pDbgState and the VMCS execution control fields to reflect
9234 * the necessary VM-exits demanded by DBGF and DTrace.
9235 *
9236 * @param pVCpu The cross context virtual CPU structure.
9237 * @param pDbgState The debug state.
9238 * @param pVmxTransient Pointer to the VMX transient structure. May update
9239 * fUpdateTscOffsettingAndPreemptTimer.
9240 */
9241static void hmR0VmxPreRunGuestDebugStateUpdate(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, PVMXTRANSIENT pVmxTransient)
9242{
9243 /*
9244 * Take down the dtrace serial number so we can spot changes.
9245 */
9246 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
9247 ASMCompilerBarrier();
9248
9249 /*
9250 * We'll rebuild most of the middle block of data members (holding the
9251 * current settings) as we go along here, so start by clearing it all.
9252 */
9253 pDbgState->bmXcptExtra = 0;
9254 pDbgState->fCpe1Extra = 0;
9255 pDbgState->fCpe1Unwanted = 0;
9256 pDbgState->fCpe2Extra = 0;
9257 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
9258 pDbgState->bmExitsToCheck[i] = 0;
9259
9260 /*
9261 * Software interrupts (INT XXh) - no idea how to trigger these...
9262 */
9263 PVM pVM = pVCpu->CTX_SUFF(pVM);
9264 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
9265 || VBOXVMM_INT_SOFTWARE_ENABLED())
9266 {
9267 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
9268 }
9269
9270 /*
9271 * INT3 breakpoints - triggered by #BP exceptions.
9272 */
9273 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
9274 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
9275
9276 /*
9277 * Exception bitmap and XCPT events+probes.
9278 */
9279 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
9280 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
9281 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
9282
9283 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
9284 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
9285 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
9286 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
9287 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
9288 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
9289 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
9290 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
9291 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
9292 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
9293 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
9294 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
9295 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
9296 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
9297 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
9298 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
9299 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
9300 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
9301
9302 if (pDbgState->bmXcptExtra)
9303 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
9304
9305 /*
9306 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
9307 *
9308 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
9309 * So, when adding/changing/removing please don't forget to update it.
9310 *
9311 * Some of the macros are picking up local variables to save horizontal space,
9312 * (being able to see it in a table is the lesser evil here).
9313 */
9314#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
9315 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
9316 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
9317#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
9318 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
9319 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
9320 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
9321 } else do { } while (0)
9322#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
9323 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
9324 { \
9325 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
9326 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
9327 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
9328 } else do { } while (0)
9329#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
9330 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
9331 { \
9332 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
9333 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
9334 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
9335 } else do { } while (0)
9336#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
9337 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
9338 { \
9339 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
9340 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
9341 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
9342 } else do { } while (0)
9343
9344 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
9345 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
9346 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
9347 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
9348 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
9349
9350 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
9351 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
9352 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
9353 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
9354 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
9355 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
9356 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
9357 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
9358 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
9359 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
9360 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
9361 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
9362 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
9363 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
9364 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
9365 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
9366 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
9367 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
9368 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
9369 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
9370 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
9371 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
9372 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
9373 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
9374 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
9375 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
9376 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
9377 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
9378 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
9379 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
9380 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
9381 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
9382 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
9383 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
9384 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
9385 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
9386
9387 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
9388 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
9389 {
9390 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
9391 AssertRC(rc);
9392
9393#if 0 /** @todo fix me */
9394 pDbgState->fClearCr0Mask = true;
9395 pDbgState->fClearCr4Mask = true;
9396#endif
9397 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
9398 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
9399 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
9400 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
9401 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
9402 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
9403 require clearing here and in the loop if we start using it. */
9404 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
9405 }
9406 else
9407 {
9408 if (pDbgState->fClearCr0Mask)
9409 {
9410 pDbgState->fClearCr0Mask = false;
9411 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);
9412 }
9413 if (pDbgState->fClearCr4Mask)
9414 {
9415 pDbgState->fClearCr4Mask = false;
9416 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4);
9417 }
9418 }
9419 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
9420 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
9421
9422 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
9423 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
9424 {
9425 /** @todo later, need to fix handler as it assumes this won't usually happen. */
9426 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
9427 }
9428 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
9429 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
9430
9431 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
9432 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
9433 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
9434 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
9435 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
9436 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
9437 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
9438 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
9439#if 0 /** @todo too slow, fix handler. */
9440 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
9441#endif
9442 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
9443
9444 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
9445 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
9446 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
9447 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
9448 {
9449 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
9450 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
9451 }
9452 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
9453 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
9454 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
9455 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
9456
9457 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
9458 || IS_EITHER_ENABLED(pVM, INSTR_STR)
9459 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
9460 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
9461 {
9462 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
9463 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
9464 }
9465 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
9466 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
9467 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
9468 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
9469
9470 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
9471 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
9472 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
9473 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
9474 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
9475 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
9476 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
9477 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
9478 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
9479 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
9480 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
9481 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
9482 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
9483 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
9484 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
9485 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
9486 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
9487 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
9488 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
9489 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
9490 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
9491 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
9492
9493#undef IS_EITHER_ENABLED
9494#undef SET_ONLY_XBM_IF_EITHER_EN
9495#undef SET_CPE1_XBM_IF_EITHER_EN
9496#undef SET_CPEU_XBM_IF_EITHER_EN
9497#undef SET_CPE2_XBM_IF_EITHER_EN
9498
9499 /*
9500 * Sanitize the control stuff.
9501 */
9502 pDbgState->fCpe2Extra &= pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1;
9503 if (pDbgState->fCpe2Extra)
9504 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
9505 pDbgState->fCpe1Extra &= pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1;
9506 pDbgState->fCpe1Unwanted &= ~pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed0;
9507 if (pVCpu->hm.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
9508 {
9509 pVCpu->hm.s.fDebugWantRdTscExit ^= true;
9510 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
9511 }
9512
9513 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
9514 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
9515 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
9516 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
9517}
9518
9519
9520/**
9521 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
9522 * appropriate.
9523 *
9524 * The caller has checked the VM-exit against the
9525 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
9526 * already, so we don't have to do that either.
9527 *
9528 * @returns Strict VBox status code (i.e. informational status codes too).
9529 * @param pVCpu The cross context virtual CPU structure.
9530 * @param pVmxTransient Pointer to the VMX-transient structure.
9531 * @param uExitReason The VM-exit reason.
9532 *
9533 * @remarks The name of this function is displayed by dtrace, so keep it short
9534 * and to the point. No longer than 33 chars long, please.
9535 */
9536static VBOXSTRICTRC hmR0VmxHandleExitDtraceEvents(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
9537{
9538 /*
9539 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
9540 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
9541 *
9542 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
9543 * does. Must add/change/remove both places. Same ordering, please.
9544 *
9545 * Added/removed events must also be reflected in the next section
9546 * where we dispatch dtrace events.
9547 */
9548 bool fDtrace1 = false;
9549 bool fDtrace2 = false;
9550 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
9551 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
9552 uint32_t uEventArg = 0;
9553#define SET_EXIT(a_EventSubName) \
9554 do { \
9555 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
9556 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
9557 } while (0)
9558#define SET_BOTH(a_EventSubName) \
9559 do { \
9560 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
9561 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
9562 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
9563 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
9564 } while (0)
9565 switch (uExitReason)
9566 {
9567 case VMX_EXIT_MTF:
9568 return hmR0VmxExitMtf(pVCpu, pVmxTransient);
9569
9570 case VMX_EXIT_XCPT_OR_NMI:
9571 {
9572 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
9573 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
9574 {
9575 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9576 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9577 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9578 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
9579 {
9580 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
9581 {
9582 hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
9583 uEventArg = pVmxTransient->uExitIntErrorCode;
9584 }
9585 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
9586 switch (enmEvent1)
9587 {
9588 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
9589 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
9590 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
9591 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
9592 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
9593 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
9594 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
9595 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
9596 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
9597 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
9598 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
9599 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
9600 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
9601 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
9602 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
9603 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
9604 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
9605 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
9606 default: break;
9607 }
9608 }
9609 else
9610 AssertFailed();
9611 break;
9612
9613 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9614 uEventArg = idxVector;
9615 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
9616 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
9617 break;
9618 }
9619 break;
9620 }
9621
9622 case VMX_EXIT_TRIPLE_FAULT:
9623 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
9624 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
9625 break;
9626 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
9627 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
9628 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
9629 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
9630 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
9631
9632 /* Instruction specific VM-exits: */
9633 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
9634 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
9635 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
9636 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
9637 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
9638 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
9639 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
9640 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
9641 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
9642 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
9643 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
9644 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
9645 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
9646 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
9647 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
9648 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
9649 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
9650 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
9651 case VMX_EXIT_MOV_CRX:
9652 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
9653 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
9654 SET_BOTH(CRX_READ);
9655 else
9656 SET_BOTH(CRX_WRITE);
9657 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9658 break;
9659 case VMX_EXIT_MOV_DRX:
9660 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
9661 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
9662 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
9663 SET_BOTH(DRX_READ);
9664 else
9665 SET_BOTH(DRX_WRITE);
9666 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9667 break;
9668 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
9669 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
9670 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
9671 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
9672 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
9673 case VMX_EXIT_GDTR_IDTR_ACCESS:
9674 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
9675 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
9676 {
9677 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
9678 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
9679 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
9680 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
9681 }
9682 break;
9683
9684 case VMX_EXIT_LDTR_TR_ACCESS:
9685 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
9686 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
9687 {
9688 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
9689 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
9690 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
9691 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
9692 }
9693 break;
9694
9695 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
9696 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
9697 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
9698 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
9699 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
9700 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
9701 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
9702 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
9703 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
9704 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
9705 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
9706
9707 /* Events that aren't relevant at this point. */
9708 case VMX_EXIT_EXT_INT:
9709 case VMX_EXIT_INT_WINDOW:
9710 case VMX_EXIT_NMI_WINDOW:
9711 case VMX_EXIT_TPR_BELOW_THRESHOLD:
9712 case VMX_EXIT_PREEMPT_TIMER:
9713 case VMX_EXIT_IO_INSTR:
9714 break;
9715
9716 /* Errors and unexpected events. */
9717 case VMX_EXIT_INIT_SIGNAL:
9718 case VMX_EXIT_SIPI:
9719 case VMX_EXIT_IO_SMI:
9720 case VMX_EXIT_SMI:
9721 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
9722 case VMX_EXIT_ERR_MSR_LOAD:
9723 case VMX_EXIT_ERR_MACHINE_CHECK:
9724 break;
9725
9726 default:
9727 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
9728 break;
9729 }
9730#undef SET_BOTH
9731#undef SET_EXIT
9732
9733 /*
9734 * Dtrace tracepoints go first. We do them here at once so we don't
9735 * have to copy the guest state saving and stuff a few dozen times.
9736 * Down side is that we've got to repeat the switch, though this time
9737 * we use enmEvent since the probes are a subset of what DBGF does.
9738 */
9739 if (fDtrace1 || fDtrace2)
9740 {
9741 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
9742 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
9743 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9744 switch (enmEvent1)
9745 {
9746 /** @todo consider which extra parameters would be helpful for each probe. */
9747 case DBGFEVENT_END: break;
9748 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
9749 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
9750 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
9751 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
9752 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
9753 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
9754 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
9755 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
9756 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
9757 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
9758 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
9759 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
9760 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
9761 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
9762 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
9763 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
9764 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
9765 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
9766 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
9767 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
9768 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
9769 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
9770 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
9771 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
9772 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
9773 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
9774 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
9775 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
9776 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
9777 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
9778 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
9779 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
9780 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
9781 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
9782 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
9783 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
9784 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
9785 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
9786 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
9787 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
9788 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
9789 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
9790 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
9791 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
9792 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
9793 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
9794 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
9795 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
9796 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
9797 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
9798 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
9799 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
9800 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
9801 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
9802 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
9803 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
9804 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
9805 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
9806 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
9807 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
9808 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
9809 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
9810 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
9811 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
9812 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
9813 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
9814 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
9815 }
9816 switch (enmEvent2)
9817 {
9818 /** @todo consider which extra parameters would be helpful for each probe. */
9819 case DBGFEVENT_END: break;
9820 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
9821 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
9822 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
9823 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
9824 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
9825 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
9826 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
9827 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
9828 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
9829 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
9830 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
9831 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
9832 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
9833 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
9834 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
9835 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
9836 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
9837 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
9838 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
9839 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
9840 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
9841 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
9842 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
9843 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
9844 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
9845 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
9846 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
9847 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
9848 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
9849 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
9850 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
9851 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
9852 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
9853 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
9854 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
9855 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
9856 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
9857 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
9858 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
9859 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
9860 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
9861 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
9862 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
9863 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
9864 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
9865 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
9866 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
9867 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
9868 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
9869 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
9870 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
9871 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
9872 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
9873 }
9874 }
9875
9876 /*
9877 * Fire of the DBGF event, if enabled (our check here is just a quick one,
9878 * the DBGF call will do a full check).
9879 *
9880 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
9881 * Note! If we have to events, we prioritize the first, i.e. the instruction
9882 * one, in order to avoid event nesting.
9883 */
9884 PVM pVM = pVCpu->CTX_SUFF(pVM);
9885 if ( enmEvent1 != DBGFEVENT_END
9886 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
9887 {
9888 HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
9889 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
9890 if (rcStrict != VINF_SUCCESS)
9891 return rcStrict;
9892 }
9893 else if ( enmEvent2 != DBGFEVENT_END
9894 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
9895 {
9896 HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
9897 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
9898 if (rcStrict != VINF_SUCCESS)
9899 return rcStrict;
9900 }
9901
9902 return VINF_SUCCESS;
9903}
9904
9905
9906/**
9907 * Single-stepping VM-exit filtering.
9908 *
9909 * This is preprocessing the VM-exits and deciding whether we've gotten far
9910 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
9911 * handling is performed.
9912 *
9913 * @returns Strict VBox status code (i.e. informational status codes too).
9914 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9915 * @param pVmxTransient Pointer to the VMX-transient structure.
9916 * @param pDbgState The debug state.
9917 */
9918DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
9919{
9920 /*
9921 * Expensive (saves context) generic dtrace VM-exit probe.
9922 */
9923 uint32_t const uExitReason = pVmxTransient->uExitReason;
9924 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
9925 { /* more likely */ }
9926 else
9927 {
9928 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
9929 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
9930 AssertRC(rc);
9931 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
9932 }
9933
9934 /*
9935 * Check for host NMI, just to get that out of the way.
9936 */
9937 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
9938 { /* normally likely */ }
9939 else
9940 {
9941 int rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
9942 AssertRCReturn(rc2, rc2);
9943 uint32_t uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
9944 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
9945 return hmR0VmxExitXcptOrNmi(pVCpu, pVmxTransient);
9946 }
9947
9948 /*
9949 * Check for single stepping event if we're stepping.
9950 */
9951 if (pVCpu->hm.s.fSingleInstruction)
9952 {
9953 switch (uExitReason)
9954 {
9955 case VMX_EXIT_MTF:
9956 return hmR0VmxExitMtf(pVCpu, pVmxTransient);
9957
9958 /* Various events: */
9959 case VMX_EXIT_XCPT_OR_NMI:
9960 case VMX_EXIT_EXT_INT:
9961 case VMX_EXIT_TRIPLE_FAULT:
9962 case VMX_EXIT_INT_WINDOW:
9963 case VMX_EXIT_NMI_WINDOW:
9964 case VMX_EXIT_TASK_SWITCH:
9965 case VMX_EXIT_TPR_BELOW_THRESHOLD:
9966 case VMX_EXIT_APIC_ACCESS:
9967 case VMX_EXIT_EPT_VIOLATION:
9968 case VMX_EXIT_EPT_MISCONFIG:
9969 case VMX_EXIT_PREEMPT_TIMER:
9970
9971 /* Instruction specific VM-exits: */
9972 case VMX_EXIT_CPUID:
9973 case VMX_EXIT_GETSEC:
9974 case VMX_EXIT_HLT:
9975 case VMX_EXIT_INVD:
9976 case VMX_EXIT_INVLPG:
9977 case VMX_EXIT_RDPMC:
9978 case VMX_EXIT_RDTSC:
9979 case VMX_EXIT_RSM:
9980 case VMX_EXIT_VMCALL:
9981 case VMX_EXIT_VMCLEAR:
9982 case VMX_EXIT_VMLAUNCH:
9983 case VMX_EXIT_VMPTRLD:
9984 case VMX_EXIT_VMPTRST:
9985 case VMX_EXIT_VMREAD:
9986 case VMX_EXIT_VMRESUME:
9987 case VMX_EXIT_VMWRITE:
9988 case VMX_EXIT_VMXOFF:
9989 case VMX_EXIT_VMXON:
9990 case VMX_EXIT_MOV_CRX:
9991 case VMX_EXIT_MOV_DRX:
9992 case VMX_EXIT_IO_INSTR:
9993 case VMX_EXIT_RDMSR:
9994 case VMX_EXIT_WRMSR:
9995 case VMX_EXIT_MWAIT:
9996 case VMX_EXIT_MONITOR:
9997 case VMX_EXIT_PAUSE:
9998 case VMX_EXIT_GDTR_IDTR_ACCESS:
9999 case VMX_EXIT_LDTR_TR_ACCESS:
10000 case VMX_EXIT_INVEPT:
10001 case VMX_EXIT_RDTSCP:
10002 case VMX_EXIT_INVVPID:
10003 case VMX_EXIT_WBINVD:
10004 case VMX_EXIT_XSETBV:
10005 case VMX_EXIT_RDRAND:
10006 case VMX_EXIT_INVPCID:
10007 case VMX_EXIT_VMFUNC:
10008 case VMX_EXIT_RDSEED:
10009 case VMX_EXIT_XSAVES:
10010 case VMX_EXIT_XRSTORS:
10011 {
10012 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
10013 AssertRCReturn(rc, rc);
10014 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
10015 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
10016 return VINF_EM_DBG_STEPPED;
10017 break;
10018 }
10019
10020 /* Errors and unexpected events: */
10021 case VMX_EXIT_INIT_SIGNAL:
10022 case VMX_EXIT_SIPI:
10023 case VMX_EXIT_IO_SMI:
10024 case VMX_EXIT_SMI:
10025 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
10026 case VMX_EXIT_ERR_MSR_LOAD:
10027 case VMX_EXIT_ERR_MACHINE_CHECK:
10028 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
10029 break;
10030
10031 default:
10032 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
10033 break;
10034 }
10035 }
10036
10037 /*
10038 * Check for debugger event breakpoints and dtrace probes.
10039 */
10040 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
10041 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
10042 {
10043 VBOXSTRICTRC rcStrict = hmR0VmxHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
10044 if (rcStrict != VINF_SUCCESS)
10045 return rcStrict;
10046 }
10047
10048 /*
10049 * Normal processing.
10050 */
10051#ifdef HMVMX_USE_FUNCTION_TABLE
10052 return g_apfnVMExitHandlers[uExitReason](pVCpu, pVmxTransient);
10053#else
10054 return hmR0VmxHandleExit(pVCpu, pVmxTransient, uExitReason);
10055#endif
10056}
10057
10058
10059/**
10060 * Single steps guest code using VT-x.
10061 *
10062 * @returns Strict VBox status code (i.e. informational status codes too).
10063 * @param pVCpu The cross context virtual CPU structure.
10064 *
10065 * @note Mostly the same as hmR0VmxRunGuestCodeNormal().
10066 */
10067static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVMCPU pVCpu)
10068{
10069 VMXTRANSIENT VmxTransient;
10070 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
10071
10072 /* Set HMCPU indicators. */
10073 bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction;
10074 pVCpu->hm.s.fSingleInstruction = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
10075 pVCpu->hm.s.fDebugWantRdTscExit = false;
10076 pVCpu->hm.s.fUsingDebugLoop = true;
10077
10078 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */
10079 VMXRUNDBGSTATE DbgState;
10080 hmR0VmxRunDebugStateInit(pVCpu, &DbgState);
10081 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &DbgState, &VmxTransient);
10082
10083 /*
10084 * The loop.
10085 */
10086 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
10087 for (uint32_t cLoops = 0; ; cLoops++)
10088 {
10089 Assert(!HMR0SuspendPending());
10090 HMVMX_ASSERT_CPU_SAFE(pVCpu);
10091 bool fStepping = pVCpu->hm.s.fSingleInstruction;
10092
10093 /*
10094 * Preparatory work for running guest code, this may force us to return
10095 * to ring-3. This bugger disables interrupts on VINF_SUCCESS!
10096 */
10097 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
10098 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Set up execute controls the next to can respond to. */
10099 rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, fStepping);
10100 if (rcStrict != VINF_SUCCESS)
10101 break;
10102
10103 hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient);
10104 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Override any obnoxious code in the above two calls. */
10105
10106 /*
10107 * Now we can run the guest code.
10108 */
10109 int rcRun = hmR0VmxRunGuest(pVCpu);
10110
10111 /*
10112 * Restore any residual host-state and save any bits shared between host
10113 * and guest into the guest-CPU state. Re-enables interrupts!
10114 */
10115 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
10116
10117 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
10118 if (RT_SUCCESS(rcRun))
10119 { /* very likely */ }
10120 else
10121 {
10122 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
10123 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
10124 return rcRun;
10125 }
10126
10127 /* Profile the VM-exit. */
10128 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
10129 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
10130 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
10131 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
10132 HMVMX_START_EXIT_DISPATCH_PROF();
10133
10134 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
10135
10136 /*
10137 * Handle the VM-exit - we quit earlier on certain VM-exits, see hmR0VmxHandleExitDebug().
10138 */
10139 rcStrict = hmR0VmxRunDebugHandleExit(pVCpu, &VmxTransient, &DbgState);
10140 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
10141 if (rcStrict != VINF_SUCCESS)
10142 break;
10143 if (cLoops > pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops)
10144 {
10145 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
10146 rcStrict = VINF_EM_RAW_INTERRUPT;
10147 break;
10148 }
10149
10150 /*
10151 * Stepping: Did the RIP change, if so, consider it a single step.
10152 * Otherwise, make sure one of the TFs gets set.
10153 */
10154 if (fStepping)
10155 {
10156 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
10157 AssertRC(rc);
10158 if ( pVCpu->cpum.GstCtx.rip != DbgState.uRipStart
10159 || pVCpu->cpum.GstCtx.cs.Sel != DbgState.uCsStart)
10160 {
10161 rcStrict = VINF_EM_DBG_STEPPED;
10162 break;
10163 }
10164 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
10165 }
10166
10167 /*
10168 * Update when dtrace settings changes (DBGF kicks us, so no need to check).
10169 */
10170 if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo)
10171 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &DbgState, &VmxTransient);
10172 }
10173
10174 /*
10175 * Clear the X86_EFL_TF if necessary.
10176 */
10177 if (pVCpu->hm.s.fClearTrapFlag)
10178 {
10179 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);
10180 AssertRC(rc);
10181 pVCpu->hm.s.fClearTrapFlag = false;
10182 pVCpu->cpum.GstCtx.eflags.Bits.u1TF = 0;
10183 }
10184 /** @todo there seems to be issues with the resume flag when the monitor trap
10185 * flag is pending without being used. Seen early in bios init when
10186 * accessing APIC page in protected mode. */
10187
10188 /*
10189 * Restore VM-exit control settings as we may not reenter this function the
10190 * next time around.
10191 */
10192 rcStrict = hmR0VmxRunDebugStateRevert(pVCpu, &DbgState, rcStrict);
10193
10194 /* Restore HMCPU indicators. */
10195 pVCpu->hm.s.fUsingDebugLoop = false;
10196 pVCpu->hm.s.fDebugWantRdTscExit = false;
10197 pVCpu->hm.s.fSingleInstruction = fSavedSingleInstruction;
10198
10199 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
10200 return rcStrict;
10201}
10202
10203
10204/** @} */
10205
10206
10207/**
10208 * Checks if any expensive dtrace probes are enabled and we should go to the
10209 * debug loop.
10210 *
10211 * @returns true if we should use debug loop, false if not.
10212 */
10213static bool hmR0VmxAnyExpensiveProbesEnabled(void)
10214{
10215 /* It's probably faster to OR the raw 32-bit counter variables together.
10216 Since the variables are in an array and the probes are next to one
10217 another (more or less), we have good locality. So, better read
10218 eight-nine cache lines ever time and only have one conditional, than
10219 128+ conditionals, right? */
10220 return ( VBOXVMM_R0_HMVMX_VMEXIT_ENABLED_RAW() /* expensive too due to context */
10221 | VBOXVMM_XCPT_DE_ENABLED_RAW()
10222 | VBOXVMM_XCPT_DB_ENABLED_RAW()
10223 | VBOXVMM_XCPT_BP_ENABLED_RAW()
10224 | VBOXVMM_XCPT_OF_ENABLED_RAW()
10225 | VBOXVMM_XCPT_BR_ENABLED_RAW()
10226 | VBOXVMM_XCPT_UD_ENABLED_RAW()
10227 | VBOXVMM_XCPT_NM_ENABLED_RAW()
10228 | VBOXVMM_XCPT_DF_ENABLED_RAW()
10229 | VBOXVMM_XCPT_TS_ENABLED_RAW()
10230 | VBOXVMM_XCPT_NP_ENABLED_RAW()
10231 | VBOXVMM_XCPT_SS_ENABLED_RAW()
10232 | VBOXVMM_XCPT_GP_ENABLED_RAW()
10233 | VBOXVMM_XCPT_PF_ENABLED_RAW()
10234 | VBOXVMM_XCPT_MF_ENABLED_RAW()
10235 | VBOXVMM_XCPT_AC_ENABLED_RAW()
10236 | VBOXVMM_XCPT_XF_ENABLED_RAW()
10237 | VBOXVMM_XCPT_VE_ENABLED_RAW()
10238 | VBOXVMM_XCPT_SX_ENABLED_RAW()
10239 | VBOXVMM_INT_SOFTWARE_ENABLED_RAW()
10240 | VBOXVMM_INT_HARDWARE_ENABLED_RAW()
10241 ) != 0
10242 || ( VBOXVMM_INSTR_HALT_ENABLED_RAW()
10243 | VBOXVMM_INSTR_MWAIT_ENABLED_RAW()
10244 | VBOXVMM_INSTR_MONITOR_ENABLED_RAW()
10245 | VBOXVMM_INSTR_CPUID_ENABLED_RAW()
10246 | VBOXVMM_INSTR_INVD_ENABLED_RAW()
10247 | VBOXVMM_INSTR_WBINVD_ENABLED_RAW()
10248 | VBOXVMM_INSTR_INVLPG_ENABLED_RAW()
10249 | VBOXVMM_INSTR_RDTSC_ENABLED_RAW()
10250 | VBOXVMM_INSTR_RDTSCP_ENABLED_RAW()
10251 | VBOXVMM_INSTR_RDPMC_ENABLED_RAW()
10252 | VBOXVMM_INSTR_RDMSR_ENABLED_RAW()
10253 | VBOXVMM_INSTR_WRMSR_ENABLED_RAW()
10254 | VBOXVMM_INSTR_CRX_READ_ENABLED_RAW()
10255 | VBOXVMM_INSTR_CRX_WRITE_ENABLED_RAW()
10256 | VBOXVMM_INSTR_DRX_READ_ENABLED_RAW()
10257 | VBOXVMM_INSTR_DRX_WRITE_ENABLED_RAW()
10258 | VBOXVMM_INSTR_PAUSE_ENABLED_RAW()
10259 | VBOXVMM_INSTR_XSETBV_ENABLED_RAW()
10260 | VBOXVMM_INSTR_SIDT_ENABLED_RAW()
10261 | VBOXVMM_INSTR_LIDT_ENABLED_RAW()
10262 | VBOXVMM_INSTR_SGDT_ENABLED_RAW()
10263 | VBOXVMM_INSTR_LGDT_ENABLED_RAW()
10264 | VBOXVMM_INSTR_SLDT_ENABLED_RAW()
10265 | VBOXVMM_INSTR_LLDT_ENABLED_RAW()
10266 | VBOXVMM_INSTR_STR_ENABLED_RAW()
10267 | VBOXVMM_INSTR_LTR_ENABLED_RAW()
10268 | VBOXVMM_INSTR_GETSEC_ENABLED_RAW()
10269 | VBOXVMM_INSTR_RSM_ENABLED_RAW()
10270 | VBOXVMM_INSTR_RDRAND_ENABLED_RAW()
10271 | VBOXVMM_INSTR_RDSEED_ENABLED_RAW()
10272 | VBOXVMM_INSTR_XSAVES_ENABLED_RAW()
10273 | VBOXVMM_INSTR_XRSTORS_ENABLED_RAW()
10274 | VBOXVMM_INSTR_VMM_CALL_ENABLED_RAW()
10275 | VBOXVMM_INSTR_VMX_VMCLEAR_ENABLED_RAW()
10276 | VBOXVMM_INSTR_VMX_VMLAUNCH_ENABLED_RAW()
10277 | VBOXVMM_INSTR_VMX_VMPTRLD_ENABLED_RAW()
10278 | VBOXVMM_INSTR_VMX_VMPTRST_ENABLED_RAW()
10279 | VBOXVMM_INSTR_VMX_VMREAD_ENABLED_RAW()
10280 | VBOXVMM_INSTR_VMX_VMRESUME_ENABLED_RAW()
10281 | VBOXVMM_INSTR_VMX_VMWRITE_ENABLED_RAW()
10282 | VBOXVMM_INSTR_VMX_VMXOFF_ENABLED_RAW()
10283 | VBOXVMM_INSTR_VMX_VMXON_ENABLED_RAW()
10284 | VBOXVMM_INSTR_VMX_VMFUNC_ENABLED_RAW()
10285 | VBOXVMM_INSTR_VMX_INVEPT_ENABLED_RAW()
10286 | VBOXVMM_INSTR_VMX_INVVPID_ENABLED_RAW()
10287 | VBOXVMM_INSTR_VMX_INVPCID_ENABLED_RAW()
10288 ) != 0
10289 || ( VBOXVMM_EXIT_TASK_SWITCH_ENABLED_RAW()
10290 | VBOXVMM_EXIT_HALT_ENABLED_RAW()
10291 | VBOXVMM_EXIT_MWAIT_ENABLED_RAW()
10292 | VBOXVMM_EXIT_MONITOR_ENABLED_RAW()
10293 | VBOXVMM_EXIT_CPUID_ENABLED_RAW()
10294 | VBOXVMM_EXIT_INVD_ENABLED_RAW()
10295 | VBOXVMM_EXIT_WBINVD_ENABLED_RAW()
10296 | VBOXVMM_EXIT_INVLPG_ENABLED_RAW()
10297 | VBOXVMM_EXIT_RDTSC_ENABLED_RAW()
10298 | VBOXVMM_EXIT_RDTSCP_ENABLED_RAW()
10299 | VBOXVMM_EXIT_RDPMC_ENABLED_RAW()
10300 | VBOXVMM_EXIT_RDMSR_ENABLED_RAW()
10301 | VBOXVMM_EXIT_WRMSR_ENABLED_RAW()
10302 | VBOXVMM_EXIT_CRX_READ_ENABLED_RAW()
10303 | VBOXVMM_EXIT_CRX_WRITE_ENABLED_RAW()
10304 | VBOXVMM_EXIT_DRX_READ_ENABLED_RAW()
10305 | VBOXVMM_EXIT_DRX_WRITE_ENABLED_RAW()
10306 | VBOXVMM_EXIT_PAUSE_ENABLED_RAW()
10307 | VBOXVMM_EXIT_XSETBV_ENABLED_RAW()
10308 | VBOXVMM_EXIT_SIDT_ENABLED_RAW()
10309 | VBOXVMM_EXIT_LIDT_ENABLED_RAW()
10310 | VBOXVMM_EXIT_SGDT_ENABLED_RAW()
10311 | VBOXVMM_EXIT_LGDT_ENABLED_RAW()
10312 | VBOXVMM_EXIT_SLDT_ENABLED_RAW()
10313 | VBOXVMM_EXIT_LLDT_ENABLED_RAW()
10314 | VBOXVMM_EXIT_STR_ENABLED_RAW()
10315 | VBOXVMM_EXIT_LTR_ENABLED_RAW()
10316 | VBOXVMM_EXIT_GETSEC_ENABLED_RAW()
10317 | VBOXVMM_EXIT_RSM_ENABLED_RAW()
10318 | VBOXVMM_EXIT_RDRAND_ENABLED_RAW()
10319 | VBOXVMM_EXIT_RDSEED_ENABLED_RAW()
10320 | VBOXVMM_EXIT_XSAVES_ENABLED_RAW()
10321 | VBOXVMM_EXIT_XRSTORS_ENABLED_RAW()
10322 | VBOXVMM_EXIT_VMM_CALL_ENABLED_RAW()
10323 | VBOXVMM_EXIT_VMX_VMCLEAR_ENABLED_RAW()
10324 | VBOXVMM_EXIT_VMX_VMLAUNCH_ENABLED_RAW()
10325 | VBOXVMM_EXIT_VMX_VMPTRLD_ENABLED_RAW()
10326 | VBOXVMM_EXIT_VMX_VMPTRST_ENABLED_RAW()
10327 | VBOXVMM_EXIT_VMX_VMREAD_ENABLED_RAW()
10328 | VBOXVMM_EXIT_VMX_VMRESUME_ENABLED_RAW()
10329 | VBOXVMM_EXIT_VMX_VMWRITE_ENABLED_RAW()
10330 | VBOXVMM_EXIT_VMX_VMXOFF_ENABLED_RAW()
10331 | VBOXVMM_EXIT_VMX_VMXON_ENABLED_RAW()
10332 | VBOXVMM_EXIT_VMX_VMFUNC_ENABLED_RAW()
10333 | VBOXVMM_EXIT_VMX_INVEPT_ENABLED_RAW()
10334 | VBOXVMM_EXIT_VMX_INVVPID_ENABLED_RAW()
10335 | VBOXVMM_EXIT_VMX_INVPCID_ENABLED_RAW()
10336 | VBOXVMM_EXIT_VMX_EPT_VIOLATION_ENABLED_RAW()
10337 | VBOXVMM_EXIT_VMX_EPT_MISCONFIG_ENABLED_RAW()
10338 | VBOXVMM_EXIT_VMX_VAPIC_ACCESS_ENABLED_RAW()
10339 | VBOXVMM_EXIT_VMX_VAPIC_WRITE_ENABLED_RAW()
10340 ) != 0;
10341}
10342
10343
10344/**
10345 * Runs the guest code using VT-x.
10346 *
10347 * @returns Strict VBox status code (i.e. informational status codes too).
10348 * @param pVCpu The cross context virtual CPU structure.
10349 */
10350VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPU pVCpu)
10351{
10352 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10353 Assert(VMMRZCallRing3IsEnabled(pVCpu));
10354 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
10355 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
10356
10357 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pCtx);
10358
10359 VBOXSTRICTRC rcStrict;
10360#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10361 bool const fInNestedGuestMode = CPUMIsGuestInVmxNonRootMode(pCtx);
10362#else
10363 bool const fInNestedGuestMode = false;
10364#endif
10365 if (!fInNestedGuestMode)
10366 {
10367 if ( !pVCpu->hm.s.fUseDebugLoop
10368 && (!VBOXVMM_ANY_PROBES_ENABLED() || !hmR0VmxAnyExpensiveProbesEnabled())
10369 && !DBGFIsStepping(pVCpu)
10370 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
10371 rcStrict = hmR0VmxRunGuestCodeNormal(pVCpu);
10372 else
10373 rcStrict = hmR0VmxRunGuestCodeDebug(pVCpu);
10374 }
10375#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10376 else
10377 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
10378
10379 if (rcStrict == VINF_VMX_VMLAUNCH_VMRESUME)
10380 rcStrict = hmR0VmxRunGuestCodeNested(pVCpu);
10381#endif
10382
10383 if (rcStrict == VERR_EM_INTERPRETER)
10384 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
10385 else if (rcStrict == VINF_EM_RESET)
10386 rcStrict = VINF_EM_TRIPLE_FAULT;
10387
10388 int rc2 = hmR0VmxExitToRing3(pVCpu, rcStrict);
10389 if (RT_FAILURE(rc2))
10390 {
10391 pVCpu->hm.s.u32HMError = (uint32_t)VBOXSTRICTRC_VAL(rcStrict);
10392 rcStrict = rc2;
10393 }
10394 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
10395 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
10396 return rcStrict;
10397}
10398
10399
10400#ifndef HMVMX_USE_FUNCTION_TABLE
10401DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
10402{
10403#ifdef DEBUG_ramshankar
10404#define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
10405 do { \
10406 if (a_fSave != 0) \
10407 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); \
10408 VBOXSTRICTRC rcStrict = a_CallExpr; \
10409 if (a_fSave != 0) \
10410 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); \
10411 return rcStrict; \
10412 } while (0)
10413#else
10414# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
10415#endif
10416 switch (rcReason)
10417 {
10418 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, hmR0VmxExitEptMisconfig(pVCpu, pVmxTransient));
10419 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, hmR0VmxExitEptViolation(pVCpu, pVmxTransient));
10420 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, hmR0VmxExitIoInstr(pVCpu, pVmxTransient));
10421 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, hmR0VmxExitCpuid(pVCpu, pVmxTransient));
10422 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, hmR0VmxExitRdtsc(pVCpu, pVmxTransient));
10423 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, hmR0VmxExitRdtscp(pVCpu, pVmxTransient));
10424 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitApicAccess(pVCpu, pVmxTransient));
10425 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, hmR0VmxExitXcptOrNmi(pVCpu, pVmxTransient));
10426 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovCRx(pVCpu, pVmxTransient));
10427 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, hmR0VmxExitExtInt(pVCpu, pVmxTransient));
10428 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, hmR0VmxExitIntWindow(pVCpu, pVmxTransient));
10429 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, hmR0VmxExitTprBelowThreshold(pVCpu, pVmxTransient));
10430 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, hmR0VmxExitMwait(pVCpu, pVmxTransient));
10431 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, hmR0VmxExitMonitor(pVCpu, pVmxTransient));
10432 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, hmR0VmxExitTaskSwitch(pVCpu, pVmxTransient));
10433 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, hmR0VmxExitPreemptTimer(pVCpu, pVmxTransient));
10434 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, hmR0VmxExitRdmsr(pVCpu, pVmxTransient));
10435 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, hmR0VmxExitWrmsr(pVCpu, pVmxTransient));
10436 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, hmR0VmxExitVmcall(pVCpu, pVmxTransient));
10437 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovDRx(pVCpu, pVmxTransient));
10438 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, hmR0VmxExitHlt(pVCpu, pVmxTransient));
10439 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, hmR0VmxExitInvd(pVCpu, pVmxTransient));
10440 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, hmR0VmxExitInvlpg(pVCpu, pVmxTransient));
10441 case VMX_EXIT_RSM: VMEXIT_CALL_RET(0, hmR0VmxExitRsm(pVCpu, pVmxTransient));
10442 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, hmR0VmxExitMtf(pVCpu, pVmxTransient));
10443 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, hmR0VmxExitPause(pVCpu, pVmxTransient));
10444 case VMX_EXIT_GDTR_IDTR_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pVmxTransient));
10445 case VMX_EXIT_LDTR_TR_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pVmxTransient));
10446 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, hmR0VmxExitWbinvd(pVCpu, pVmxTransient));
10447 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, hmR0VmxExitXsetbv(pVCpu, pVmxTransient));
10448 case VMX_EXIT_RDRAND: VMEXIT_CALL_RET(0, hmR0VmxExitRdrand(pVCpu, pVmxTransient));
10449 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, hmR0VmxExitInvpcid(pVCpu, pVmxTransient));
10450 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, hmR0VmxExitGetsec(pVCpu, pVmxTransient));
10451 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, hmR0VmxExitRdpmc(pVCpu, pVmxTransient));
10452#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10453 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, hmR0VmxExitVmclear(pVCpu, pVmxTransient));
10454 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, hmR0VmxExitVmlaunch(pVCpu, pVmxTransient));
10455 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, hmR0VmxExitVmptrld(pVCpu, pVmxTransient));
10456 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, hmR0VmxExitVmptrst(pVCpu, pVmxTransient));
10457 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, hmR0VmxExitVmread(pVCpu, pVmxTransient));
10458 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, hmR0VmxExitVmwrite(pVCpu, pVmxTransient));
10459 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, hmR0VmxExitVmresume(pVCpu, pVmxTransient));
10460 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, hmR0VmxExitVmxoff(pVCpu, pVmxTransient));
10461 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, hmR0VmxExitVmxon(pVCpu, pVmxTransient));
10462#else
10463 case VMX_EXIT_VMCLEAR:
10464 case VMX_EXIT_VMLAUNCH:
10465 case VMX_EXIT_VMPTRLD:
10466 case VMX_EXIT_VMPTRST:
10467 case VMX_EXIT_VMREAD:
10468 case VMX_EXIT_VMRESUME:
10469 case VMX_EXIT_VMWRITE:
10470 case VMX_EXIT_VMXOFF:
10471 case VMX_EXIT_VMXON:
10472 return hmR0VmxExitSetPendingXcptUD(pVCpu, pVmxTransient);
10473#endif
10474
10475 case VMX_EXIT_TRIPLE_FAULT: return hmR0VmxExitTripleFault(pVCpu, pVmxTransient);
10476 case VMX_EXIT_NMI_WINDOW: return hmR0VmxExitNmiWindow(pVCpu, pVmxTransient);
10477 case VMX_EXIT_INIT_SIGNAL: return hmR0VmxExitInitSignal(pVCpu, pVmxTransient);
10478 case VMX_EXIT_SIPI: return hmR0VmxExitSipi(pVCpu, pVmxTransient);
10479 case VMX_EXIT_IO_SMI: return hmR0VmxExitIoSmi(pVCpu, pVmxTransient);
10480 case VMX_EXIT_SMI: return hmR0VmxExitSmi(pVCpu, pVmxTransient);
10481 case VMX_EXIT_ERR_MSR_LOAD: return hmR0VmxExitErrMsrLoad(pVCpu, pVmxTransient);
10482 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return hmR0VmxExitErrInvalidGuestState(pVCpu, pVmxTransient);
10483 case VMX_EXIT_ERR_MACHINE_CHECK: return hmR0VmxExitErrMachineCheck(pVCpu, pVmxTransient);
10484
10485 case VMX_EXIT_INVEPT:
10486 case VMX_EXIT_INVVPID:
10487 case VMX_EXIT_VMFUNC:
10488 case VMX_EXIT_XSAVES:
10489 case VMX_EXIT_XRSTORS:
10490 return hmR0VmxExitSetPendingXcptUD(pVCpu, pVmxTransient);
10491
10492 case VMX_EXIT_ENCLS:
10493 case VMX_EXIT_RDSEED: /* only spurious VM-exits, so undefined */
10494 case VMX_EXIT_PML_FULL:
10495 default:
10496 return hmR0VmxExitErrUndefined(pVCpu, pVmxTransient);
10497 }
10498#undef VMEXIT_CALL_RET
10499}
10500#endif /* !HMVMX_USE_FUNCTION_TABLE */
10501
10502
10503#ifdef VBOX_STRICT
10504/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
10505# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
10506 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
10507
10508# define HMVMX_ASSERT_PREEMPT_CPUID() \
10509 do { \
10510 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
10511 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
10512 } while (0)
10513
10514# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
10515 do { \
10516 AssertPtr((a_pVCpu)); \
10517 AssertPtr((a_pVmxTransient)); \
10518 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
10519 Assert(ASMIntAreEnabled()); \
10520 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
10521 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
10522 Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", (a_pVCpu)->idCpu)); \
10523 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
10524 if (VMMR0IsLogFlushDisabled((a_pVCpu))) \
10525 HMVMX_ASSERT_PREEMPT_CPUID(); \
10526 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
10527 } while (0)
10528
10529# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
10530 do { \
10531 Log4Func(("\n")); \
10532 } while (0)
10533#else
10534# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
10535 do { \
10536 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
10537 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
10538 } while (0)
10539# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
10540#endif
10541
10542
10543/**
10544 * Advances the guest RIP by the specified number of bytes.
10545 *
10546 * @param pVCpu The cross context virtual CPU structure.
10547 * @param cbInstr Number of bytes to advance the RIP by.
10548 *
10549 * @remarks No-long-jump zone!!!
10550 */
10551DECLINLINE(void) hmR0VmxAdvanceGuestRipBy(PVMCPU pVCpu, uint32_t cbInstr)
10552{
10553 /* Advance the RIP. */
10554 pVCpu->cpum.GstCtx.rip += cbInstr;
10555 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
10556
10557 /* Update interrupt inhibition. */
10558 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10559 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
10560 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
10561}
10562
10563
10564/**
10565 * Advances the guest RIP after reading it from the VMCS.
10566 *
10567 * @returns VBox status code, no informational status codes.
10568 * @param pVCpu The cross context virtual CPU structure.
10569 * @param pVmxTransient Pointer to the VMX transient structure.
10570 *
10571 * @remarks No-long-jump zone!!!
10572 */
10573static int hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
10574{
10575 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
10576 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
10577 AssertRCReturn(rc, rc);
10578
10579 hmR0VmxAdvanceGuestRipBy(pVCpu, pVmxTransient->cbInstr);
10580 return VINF_SUCCESS;
10581}
10582
10583
10584/**
10585 * Tries to determine what part of the guest-state VT-x has deemed as invalid
10586 * and update error record fields accordingly.
10587 *
10588 * @return VMX_IGS_* return codes.
10589 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
10590 * wrong with the guest state.
10591 *
10592 * @param pVCpu The cross context virtual CPU structure.
10593 *
10594 * @remarks This function assumes our cache of the VMCS controls
10595 * are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded.
10596 */
10597static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu)
10598{
10599#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
10600#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { \
10601 uError = (err); \
10602 break; \
10603 } else do { } while (0)
10604
10605 int rc;
10606 PVM pVM = pVCpu->CTX_SUFF(pVM);
10607 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10608 uint32_t uError = VMX_IGS_ERROR;
10609 uint32_t u32Val;
10610 bool const fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
10611
10612 do
10613 {
10614 /*
10615 * CR0.
10616 */
10617 uint32_t fSetCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
10618 uint32_t const fZapCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
10619 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG).
10620 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
10621 if (fUnrestrictedGuest)
10622 fSetCr0 &= ~(X86_CR0_PE | X86_CR0_PG);
10623
10624 uint32_t u32GuestCr0;
10625 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32GuestCr0);
10626 AssertRCBreak(rc);
10627 HMVMX_CHECK_BREAK((u32GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
10628 HMVMX_CHECK_BREAK(!(u32GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
10629 if ( !fUnrestrictedGuest
10630 && (u32GuestCr0 & X86_CR0_PG)
10631 && !(u32GuestCr0 & X86_CR0_PE))
10632 {
10633 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
10634 }
10635
10636 /*
10637 * CR4.
10638 */
10639 uint64_t const fSetCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
10640 uint64_t const fZapCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
10641
10642 uint32_t u32GuestCr4;
10643 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32GuestCr4);
10644 AssertRCBreak(rc);
10645 HMVMX_CHECK_BREAK((u32GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
10646 HMVMX_CHECK_BREAK(!(u32GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
10647
10648 /*
10649 * IA32_DEBUGCTL MSR.
10650 */
10651 uint64_t u64Val;
10652 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
10653 AssertRCBreak(rc);
10654 if ( (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
10655 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
10656 {
10657 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
10658 }
10659 uint64_t u64DebugCtlMsr = u64Val;
10660
10661#ifdef VBOX_STRICT
10662 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
10663 AssertRCBreak(rc);
10664 Assert(u32Val == pVCpu->hm.s.vmx.Ctls.u32EntryCtls);
10665#endif
10666 bool const fLongModeGuest = RT_BOOL(pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
10667
10668 /*
10669 * RIP and RFLAGS.
10670 */
10671 uint32_t u32Eflags;
10672#if HC_ARCH_BITS == 64
10673 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
10674 AssertRCBreak(rc);
10675 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
10676 if ( !fLongModeGuest
10677 || !pCtx->cs.Attr.n.u1Long)
10678 {
10679 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
10680 }
10681 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
10682 * must be identical if the "IA-32e mode guest" VM-entry
10683 * control is 1 and CS.L is 1. No check applies if the
10684 * CPU supports 64 linear-address bits. */
10685
10686 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
10687 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
10688 AssertRCBreak(rc);
10689 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
10690 VMX_IGS_RFLAGS_RESERVED);
10691 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
10692 u32Eflags = u64Val;
10693#else
10694 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);
10695 AssertRCBreak(rc);
10696 HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */
10697 HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
10698#endif
10699
10700 if ( fLongModeGuest
10701 || ( fUnrestrictedGuest
10702 && !(u32GuestCr0 & X86_CR0_PE)))
10703 {
10704 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
10705 }
10706
10707 uint32_t u32EntryInfo;
10708 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
10709 AssertRCBreak(rc);
10710 if ( VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo)
10711 && VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_EXT_INT)
10712 {
10713 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
10714 }
10715
10716 /*
10717 * 64-bit checks.
10718 */
10719#if HC_ARCH_BITS == 64
10720 if (fLongModeGuest)
10721 {
10722 HMVMX_CHECK_BREAK(u32GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
10723 HMVMX_CHECK_BREAK(u32GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
10724 }
10725
10726 if ( !fLongModeGuest
10727 && (u32GuestCr4 & X86_CR4_PCIDE))
10728 {
10729 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
10730 }
10731
10732 /** @todo CR3 field must be such that bits 63:52 and bits in the range
10733 * 51:32 beyond the processor's physical-address width are 0. */
10734
10735 if ( (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
10736 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
10737 {
10738 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
10739 }
10740
10741 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
10742 AssertRCBreak(rc);
10743 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
10744
10745 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
10746 AssertRCBreak(rc);
10747 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
10748#endif
10749
10750 /*
10751 * PERF_GLOBAL MSR.
10752 */
10753 if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
10754 {
10755 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
10756 AssertRCBreak(rc);
10757 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
10758 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
10759 }
10760
10761 /*
10762 * PAT MSR.
10763 */
10764 if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
10765 {
10766 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
10767 AssertRCBreak(rc);
10768 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
10769 for (unsigned i = 0; i < 8; i++)
10770 {
10771 uint8_t u8Val = (u64Val & 0xff);
10772 if ( u8Val != 0 /* UC */
10773 && u8Val != 1 /* WC */
10774 && u8Val != 4 /* WT */
10775 && u8Val != 5 /* WP */
10776 && u8Val != 6 /* WB */
10777 && u8Val != 7 /* UC- */)
10778 {
10779 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
10780 }
10781 u64Val >>= 8;
10782 }
10783 }
10784
10785 /*
10786 * EFER MSR.
10787 */
10788 if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
10789 {
10790 Assert(pVM->hm.s.vmx.fSupportsVmcsEfer);
10791 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
10792 AssertRCBreak(rc);
10793 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
10794 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
10795 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVCpu->hm.s.vmx.Ctls.u32EntryCtls
10796 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
10797 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
10798 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
10799 * iemVmxVmentryCheckGuestState(). */
10800 HMVMX_CHECK_BREAK( fUnrestrictedGuest
10801 || !(u32GuestCr0 & X86_CR0_PG)
10802 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
10803 VMX_IGS_EFER_LMA_LME_MISMATCH);
10804 }
10805
10806 /*
10807 * Segment registers.
10808 */
10809 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
10810 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
10811 if (!(u32Eflags & X86_EFL_VM))
10812 {
10813 /* CS */
10814 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
10815 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
10816 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
10817 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
10818 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
10819 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
10820 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
10821 /* CS cannot be loaded with NULL in protected mode. */
10822 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
10823 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
10824 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
10825 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
10826 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
10827 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
10828 else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
10829 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
10830 else
10831 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
10832
10833 /* SS */
10834 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10835 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
10836 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
10837 if ( !(pCtx->cr0 & X86_CR0_PE)
10838 || pCtx->cs.Attr.n.u4Type == 3)
10839 {
10840 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
10841 }
10842 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
10843 {
10844 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
10845 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
10846 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
10847 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
10848 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
10849 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
10850 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
10851 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
10852 }
10853
10854 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSegmenReg(). */
10855 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
10856 {
10857 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
10858 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
10859 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10860 || pCtx->ds.Attr.n.u4Type > 11
10861 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
10862 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
10863 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
10864 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
10865 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
10866 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
10867 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
10868 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
10869 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
10870 }
10871 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
10872 {
10873 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
10874 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
10875 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10876 || pCtx->es.Attr.n.u4Type > 11
10877 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
10878 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
10879 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
10880 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
10881 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
10882 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
10883 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
10884 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
10885 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
10886 }
10887 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
10888 {
10889 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
10890 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
10891 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10892 || pCtx->fs.Attr.n.u4Type > 11
10893 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
10894 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
10895 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
10896 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
10897 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
10898 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
10899 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
10900 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
10901 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
10902 }
10903 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
10904 {
10905 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
10906 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
10907 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10908 || pCtx->gs.Attr.n.u4Type > 11
10909 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
10910 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
10911 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
10912 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
10913 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
10914 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
10915 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
10916 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
10917 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
10918 }
10919 /* 64-bit capable CPUs. */
10920#if HC_ARCH_BITS == 64
10921 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
10922 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
10923 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
10924 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
10925 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
10926 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
10927 VMX_IGS_LONGMODE_SS_BASE_INVALID);
10928 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
10929 VMX_IGS_LONGMODE_DS_BASE_INVALID);
10930 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
10931 VMX_IGS_LONGMODE_ES_BASE_INVALID);
10932#endif
10933 }
10934 else
10935 {
10936 /* V86 mode checks. */
10937 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
10938 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
10939 {
10940 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
10941 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
10942 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
10943 }
10944 else
10945 {
10946 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
10947 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
10948 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
10949 }
10950
10951 /* CS */
10952 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
10953 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
10954 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
10955 /* SS */
10956 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
10957 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
10958 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
10959 /* DS */
10960 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
10961 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
10962 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
10963 /* ES */
10964 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
10965 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
10966 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
10967 /* FS */
10968 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
10969 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
10970 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
10971 /* GS */
10972 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
10973 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
10974 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
10975 /* 64-bit capable CPUs. */
10976#if HC_ARCH_BITS == 64
10977 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
10978 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
10979 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
10980 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
10981 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
10982 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
10983 VMX_IGS_LONGMODE_SS_BASE_INVALID);
10984 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
10985 VMX_IGS_LONGMODE_DS_BASE_INVALID);
10986 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
10987 VMX_IGS_LONGMODE_ES_BASE_INVALID);
10988#endif
10989 }
10990
10991 /*
10992 * TR.
10993 */
10994 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
10995 /* 64-bit capable CPUs. */
10996#if HC_ARCH_BITS == 64
10997 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
10998#endif
10999 if (fLongModeGuest)
11000 {
11001 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
11002 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
11003 }
11004 else
11005 {
11006 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
11007 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
11008 VMX_IGS_TR_ATTR_TYPE_INVALID);
11009 }
11010 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
11011 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
11012 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
11013 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
11014 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
11015 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
11016 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
11017 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
11018
11019 /*
11020 * GDTR and IDTR.
11021 */
11022#if HC_ARCH_BITS == 64
11023 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
11024 AssertRCBreak(rc);
11025 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
11026
11027 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
11028 AssertRCBreak(rc);
11029 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
11030#endif
11031
11032 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
11033 AssertRCBreak(rc);
11034 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
11035
11036 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
11037 AssertRCBreak(rc);
11038 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
11039
11040 /*
11041 * Guest Non-Register State.
11042 */
11043 /* Activity State. */
11044 uint32_t u32ActivityState;
11045 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
11046 AssertRCBreak(rc);
11047 HMVMX_CHECK_BREAK( !u32ActivityState
11048 || (u32ActivityState & RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
11049 VMX_IGS_ACTIVITY_STATE_INVALID);
11050 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
11051 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
11052 uint32_t u32IntrState;
11053 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
11054 AssertRCBreak(rc);
11055 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
11056 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
11057 {
11058 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
11059 }
11060
11061 /** @todo Activity state and injecting interrupts. Left as a todo since we
11062 * currently don't use activity states but ACTIVE. */
11063
11064 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
11065 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
11066
11067 /* Guest interruptibility-state. */
11068 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
11069 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
11070 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
11071 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
11072 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
11073 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
11074 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
11075 if (VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo))
11076 {
11077 if (VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_EXT_INT)
11078 {
11079 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
11080 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
11081 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
11082 }
11083 else if (VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_NMI)
11084 {
11085 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
11086 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
11087 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
11088 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
11089 }
11090 }
11091 /** @todo Assumes the processor is not in SMM. */
11092 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
11093 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
11094 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
11095 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
11096 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
11097 if ( (pVCpu->hm.s.vmx.Ctls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
11098 && VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo)
11099 && VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_NMI)
11100 {
11101 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI),
11102 VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
11103 }
11104
11105 /* Pending debug exceptions. */
11106#if HC_ARCH_BITS == 64
11107 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
11108 AssertRCBreak(rc);
11109 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
11110 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
11111 u32Val = u64Val; /* For pending debug exceptions checks below. */
11112#else
11113 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u32Val);
11114 AssertRCBreak(rc);
11115 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
11116 HMVMX_CHECK_BREAK(!(u32Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
11117#endif
11118
11119 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
11120 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
11121 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
11122 {
11123 if ( (u32Eflags & X86_EFL_TF)
11124 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
11125 {
11126 /* Bit 14 is PendingDebug.BS. */
11127 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
11128 }
11129 if ( !(u32Eflags & X86_EFL_TF)
11130 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
11131 {
11132 /* Bit 14 is PendingDebug.BS. */
11133 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
11134 }
11135 }
11136
11137 /* VMCS link pointer. */
11138 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
11139 AssertRCBreak(rc);
11140 if (u64Val != UINT64_C(0xffffffffffffffff))
11141 {
11142 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
11143 /** @todo Bits beyond the processor's physical-address width MBZ. */
11144 /** @todo 32-bit located in memory referenced by value of this field (as a
11145 * physical address) must contain the processor's VMCS revision ID. */
11146 /** @todo SMM checks. */
11147 }
11148
11149 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
11150 * not using Nested Paging? */
11151 if ( pVM->hm.s.fNestedPaging
11152 && !fLongModeGuest
11153 && CPUMIsGuestInPAEModeEx(pCtx))
11154 {
11155 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
11156 AssertRCBreak(rc);
11157 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
11158
11159 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
11160 AssertRCBreak(rc);
11161 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
11162
11163 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
11164 AssertRCBreak(rc);
11165 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
11166
11167 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
11168 AssertRCBreak(rc);
11169 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
11170 }
11171
11172 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
11173 if (uError == VMX_IGS_ERROR)
11174 uError = VMX_IGS_REASON_NOT_FOUND;
11175 } while (0);
11176
11177 pVCpu->hm.s.u32HMError = uError;
11178 return uError;
11179
11180#undef HMVMX_ERROR_BREAK
11181#undef HMVMX_CHECK_BREAK
11182}
11183
11184
11185/** @name VM-exit handlers.
11186 * @{
11187 */
11188/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
11189/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
11190/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
11191
11192/**
11193 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
11194 */
11195HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11196{
11197 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11198 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
11199 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
11200 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
11201 return VINF_SUCCESS;
11202 return VINF_EM_RAW_INTERRUPT;
11203}
11204
11205
11206/**
11207 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
11208 */
11209HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11210{
11211 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11212 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
11213
11214 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11215 AssertRCReturn(rc, rc);
11216
11217 uint32_t uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11218 Assert( !(pVCpu->hm.s.vmx.Ctls.u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
11219 && uIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
11220 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
11221
11222 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11223 {
11224 /*
11225 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
11226 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
11227 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
11228 *
11229 * [1] -- See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
11230 * [2] -- See Intel spec. 27.5.5 "Updating Non-Register State".
11231 */
11232 VMXDispatchHostNmi();
11233 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
11234 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
11235 return VINF_SUCCESS;
11236 }
11237
11238 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11239 VBOXSTRICTRC rcStrictRc1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
11240 if (RT_UNLIKELY(rcStrictRc1 == VINF_SUCCESS))
11241 { /* likely */ }
11242 else
11243 {
11244 if (rcStrictRc1 == VINF_HM_DOUBLE_FAULT)
11245 rcStrictRc1 = VINF_SUCCESS;
11246 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
11247 return rcStrictRc1;
11248 }
11249
11250 uint32_t uExitIntInfo = pVmxTransient->uExitIntInfo;
11251 uint32_t uVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
11252 switch (uIntType)
11253 {
11254 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT: /* Privileged software exception. (#DB from ICEBP) */
11255 Assert(uVector == X86_XCPT_DB);
11256 RT_FALL_THRU();
11257 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
11258 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
11259 RT_FALL_THRU();
11260 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11261 {
11262 /*
11263 * If there's any exception caused as a result of event injection, the resulting
11264 * secondary/final execption will be pending, we shall continue guest execution
11265 * after injecting the event. The page-fault case is complicated and we manually
11266 * handle any currently pending event in hmR0VmxExitXcptPF.
11267 */
11268 if (!pVCpu->hm.s.Event.fPending)
11269 { /* likely */ }
11270 else if (uVector != X86_XCPT_PF)
11271 {
11272 rc = VINF_SUCCESS;
11273 break;
11274 }
11275
11276 switch (uVector)
11277 {
11278 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pVmxTransient); break;
11279 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pVmxTransient); break;
11280 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pVmxTransient); break;
11281 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pVmxTransient); break;
11282 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pVmxTransient); break;
11283 case X86_XCPT_AC: rc = hmR0VmxExitXcptAC(pVCpu, pVmxTransient); break;
11284
11285 case X86_XCPT_NM: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
11286 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
11287 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
11288 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
11289 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
11290 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
11291 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
11292 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
11293 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
11294 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
11295 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
11296 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
11297 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS);
11298 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
11299 default:
11300 {
11301 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
11302 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
11303 {
11304 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
11305 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
11306 Assert(CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx));
11307
11308 rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0);
11309 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11310 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11311 AssertRCReturn(rc, rc);
11312 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
11313 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode,
11314 0 /* GCPtrFaultAddress */);
11315 }
11316 else
11317 {
11318 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
11319 pVCpu->hm.s.u32HMError = uVector;
11320 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
11321 }
11322 break;
11323 }
11324 }
11325 break;
11326 }
11327
11328 default:
11329 {
11330 pVCpu->hm.s.u32HMError = uExitIntInfo;
11331 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
11332 AssertMsgFailed(("Unexpected interruption info %#x\n", VMX_EXIT_INT_INFO_TYPE(uExitIntInfo)));
11333 break;
11334 }
11335 }
11336 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
11337 return rc;
11338}
11339
11340
11341/**
11342 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
11343 */
11344HMVMX_EXIT_NSRC_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11345{
11346 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11347
11348 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
11349 hmR0VmxClearIntWindowExitVmcs(pVCpu);
11350
11351 /* Evaluate and deliver pending events and resume guest execution. */
11352 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
11353 return VINF_SUCCESS;
11354}
11355
11356
11357/**
11358 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
11359 */
11360HMVMX_EXIT_NSRC_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11361{
11362 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11363 if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)))
11364 {
11365 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
11366 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11367 }
11368
11369 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS));
11370
11371 /*
11372 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
11373 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
11374 */
11375 uint32_t fIntrState = 0;
11376 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
11377 AssertRCReturn(rc, rc);
11378 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
11379 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
11380 {
11381 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
11382 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
11383
11384 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
11385 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
11386 AssertRCReturn(rc, rc);
11387 }
11388
11389 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
11390 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
11391
11392 /* Evaluate and deliver pending events and resume guest execution. */
11393 return VINF_SUCCESS;
11394}
11395
11396
11397/**
11398 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
11399 */
11400HMVMX_EXIT_NSRC_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11401{
11402 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11403 return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
11404}
11405
11406
11407/**
11408 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
11409 */
11410HMVMX_EXIT_NSRC_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11411{
11412 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11413 return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
11414}
11415
11416
11417/**
11418 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
11419 */
11420HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11421{
11422 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11423
11424 /*
11425 * Get the state we need and update the exit history entry.
11426 */
11427 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11428 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
11429 AssertRCReturn(rc, rc);
11430
11431 VBOXSTRICTRC rcStrict;
11432 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
11433 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
11434 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
11435 if (!pExitRec)
11436 {
11437 /*
11438 * Regular CPUID instruction execution.
11439 */
11440 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbInstr);
11441 if (rcStrict == VINF_SUCCESS)
11442 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11443 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11444 {
11445 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
11446 rcStrict = VINF_SUCCESS;
11447 }
11448 }
11449 else
11450 {
11451 /*
11452 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
11453 */
11454 int rc2 = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
11455 AssertRCReturn(rc2, rc2);
11456
11457 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
11458 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
11459
11460 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
11461 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
11462
11463 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
11464 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
11465 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
11466 }
11467 return rcStrict;
11468}
11469
11470
11471/**
11472 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
11473 */
11474HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11475{
11476 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11477 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR4);
11478 AssertRCReturn(rc, rc);
11479
11480 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
11481 return VINF_EM_RAW_EMULATE_INSTR;
11482
11483 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
11484 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11485}
11486
11487
11488/**
11489 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
11490 */
11491HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11492{
11493 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11494 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
11495 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11496 AssertRCReturn(rc, rc);
11497
11498 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbInstr);
11499 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
11500 {
11501 /* If we get a spurious VM-exit when offsetting is enabled,
11502 we must reset offsetting on VM-reentry. See @bugref{6634}. */
11503 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
11504 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
11505 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11506 }
11507 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11508 {
11509 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
11510 rcStrict = VINF_SUCCESS;
11511 }
11512 return rcStrict;
11513}
11514
11515
11516/**
11517 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
11518 */
11519HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11520{
11521 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11522 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
11523 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11524 AssertRCReturn(rc, rc);
11525
11526 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbInstr);
11527 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
11528 {
11529 /* If we get a spurious VM-exit when offsetting is enabled,
11530 we must reset offsetting on VM-reentry. See @bugref{6634}. */
11531 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
11532 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
11533 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11534 }
11535 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11536 {
11537 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
11538 rcStrict = VINF_SUCCESS;
11539 }
11540 return rcStrict;
11541}
11542
11543
11544/**
11545 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
11546 */
11547HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11548{
11549 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11550 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
11551 AssertRCReturn(rc, rc);
11552
11553 PVM pVM = pVCpu->CTX_SUFF(pVM);
11554 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11555 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
11556 if (RT_LIKELY(rc == VINF_SUCCESS))
11557 {
11558 rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
11559 Assert(pVmxTransient->cbInstr == 2);
11560 }
11561 else
11562 {
11563 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
11564 rc = VERR_EM_INTERPRETER;
11565 }
11566 return rc;
11567}
11568
11569
11570/**
11571 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
11572 */
11573HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11574{
11575 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11576
11577 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
11578 if (EMAreHypercallInstructionsEnabled(pVCpu))
11579 {
11580 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
11581 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
11582 AssertRCReturn(rc, rc);
11583
11584 /* Perform the hypercall. */
11585 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
11586 if (rcStrict == VINF_SUCCESS)
11587 {
11588 rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
11589 AssertRCReturn(rc, rc);
11590 }
11591 else
11592 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
11593 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
11594 || RT_FAILURE(rcStrict));
11595
11596 /* If the hypercall changes anything other than guest's general-purpose registers,
11597 we would need to reload the guest changed bits here before VM-entry. */
11598 }
11599 else
11600 Log4Func(("Hypercalls not enabled\n"));
11601
11602 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
11603 if (RT_FAILURE(rcStrict))
11604 {
11605 hmR0VmxSetPendingXcptUD(pVCpu);
11606 rcStrict = VINF_SUCCESS;
11607 }
11608
11609 return rcStrict;
11610}
11611
11612
11613/**
11614 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
11615 */
11616HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11617{
11618 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11619 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || pVCpu->hm.s.fUsingDebugLoop);
11620
11621 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
11622 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11623 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
11624 AssertRCReturn(rc, rc);
11625
11626 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbInstr, pVmxTransient->uExitQual);
11627
11628 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
11629 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11630 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11631 {
11632 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
11633 rcStrict = VINF_SUCCESS;
11634 }
11635 else
11636 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) sttus: %Rrc\n", pVmxTransient->uExitQual,
11637 VBOXSTRICTRC_VAL(rcStrict)));
11638 return rcStrict;
11639}
11640
11641
11642/**
11643 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
11644 */
11645HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11646{
11647 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11648 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
11649 AssertRCReturn(rc, rc);
11650
11651 PVM pVM = pVCpu->CTX_SUFF(pVM);
11652 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11653 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pCtx));
11654 if (RT_LIKELY(rc == VINF_SUCCESS))
11655 rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
11656 else
11657 {
11658 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
11659 rc = VERR_EM_INTERPRETER;
11660 }
11661 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
11662 return rc;
11663}
11664
11665
11666/**
11667 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
11668 */
11669HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11670{
11671 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11672 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
11673 AssertRCReturn(rc, rc);
11674
11675 PVM pVM = pVCpu->CTX_SUFF(pVM);
11676 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11677 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pCtx));
11678 rc = VBOXSTRICTRC_VAL(rc2);
11679 if (RT_LIKELY( rc == VINF_SUCCESS
11680 || rc == VINF_EM_HALT))
11681 {
11682 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
11683 AssertRCReturn(rc3, rc3);
11684
11685 if ( rc == VINF_EM_HALT
11686 && EMMonitorWaitShouldContinue(pVCpu, pCtx))
11687 rc = VINF_SUCCESS;
11688 }
11689 else
11690 {
11691 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
11692 rc = VERR_EM_INTERPRETER;
11693 }
11694 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
11695 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
11696 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
11697 return rc;
11698}
11699
11700
11701/**
11702 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
11703 */
11704HMVMX_EXIT_NSRC_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11705{
11706 /*
11707 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root
11708 * mode. In theory, we should never get this VM-exit. This can happen only if dual-monitor
11709 * treatment of SMI and VMX is enabled, which can (only?) be done by executing VMCALL in
11710 * VMX root operation. If we get here, something funny is going on.
11711 *
11712 * See Intel spec. 33.15.5 "Enabling the Dual-Monitor Treatment".
11713 */
11714 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11715 AssertMsgFailed(("Unexpected RSM VM-exit\n"));
11716 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11717}
11718
11719
11720/**
11721 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
11722 */
11723HMVMX_EXIT_NSRC_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11724{
11725 /*
11726 * This can only happen if we support dual-monitor treatment of SMI, which can be activated
11727 * by executing VMCALL in VMX root operation. Only an STM (SMM transfer monitor) would get
11728 * this VM-exit when we (the executive monitor) execute a VMCALL in VMX root mode or receive
11729 * an SMI. If we get here, something funny is going on.
11730 *
11731 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
11732 * See Intel spec. 25.3 "Other Causes of VM-Exits"
11733 */
11734 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11735 AssertMsgFailed(("Unexpected SMI VM-exit\n"));
11736 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11737}
11738
11739
11740/**
11741 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
11742 */
11743HMVMX_EXIT_NSRC_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11744{
11745 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
11746 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11747 AssertMsgFailed(("Unexpected IO SMI VM-exit\n"));
11748 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11749}
11750
11751
11752/**
11753 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
11754 */
11755HMVMX_EXIT_NSRC_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11756{
11757 /*
11758 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used.
11759 * We don't make use of it as our guests don't have direct access to the host LAPIC.
11760 * See Intel spec. 25.3 "Other Causes of VM-exits".
11761 */
11762 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11763 AssertMsgFailed(("Unexpected SIPI VM-exit\n"));
11764 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11765}
11766
11767
11768/**
11769 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
11770 * VM-exit.
11771 */
11772HMVMX_EXIT_NSRC_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11773{
11774 /*
11775 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
11776 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery" and Intel spec. 29.3 "VMX Instructions" for "VMXON".
11777 *
11778 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these VM-exits.
11779 * See Intel spec. "23.8 Restrictions on VMX operation".
11780 */
11781 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11782 return VINF_SUCCESS;
11783}
11784
11785
11786/**
11787 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
11788 * VM-exit.
11789 */
11790HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11791{
11792 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11793 return VINF_EM_RESET;
11794}
11795
11796
11797/**
11798 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
11799 */
11800HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11801{
11802 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11803 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_HLT_EXIT);
11804
11805 int rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
11806 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RFLAGS);
11807 AssertRCReturn(rc, rc);
11808
11809 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
11810 rc = VINF_SUCCESS;
11811 else
11812 rc = VINF_EM_HALT;
11813
11814 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
11815 if (rc != VINF_SUCCESS)
11816 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
11817 return rc;
11818}
11819
11820
11821/**
11822 * VM-exit handler for instructions that result in a \#UD exception delivered to
11823 * the guest.
11824 */
11825HMVMX_EXIT_NSRC_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11826{
11827 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11828 hmR0VmxSetPendingXcptUD(pVCpu);
11829 return VINF_SUCCESS;
11830}
11831
11832
11833/**
11834 * VM-exit handler for expiry of the VMX preemption timer.
11835 */
11836HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11837{
11838 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11839
11840 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
11841 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
11842
11843 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
11844 PVM pVM = pVCpu->CTX_SUFF(pVM);
11845 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
11846 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
11847 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
11848}
11849
11850
11851/**
11852 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
11853 */
11854HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11855{
11856 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11857
11858 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11859 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
11860 AssertRCReturn(rc, rc);
11861
11862 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbInstr);
11863 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
11864 : HM_CHANGED_RAISED_XCPT_MASK);
11865
11866 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11867 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
11868
11869 return rcStrict;
11870}
11871
11872
11873/**
11874 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
11875 */
11876HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11877{
11878 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11879 /** @todo Use VM-exit instruction information. */
11880 return VERR_EM_INTERPRETER;
11881}
11882
11883
11884/**
11885 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
11886 * Error VM-exit.
11887 */
11888HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11889{
11890 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
11891 AssertRCReturn(rc, rc);
11892 rc = hmR0VmxCheckVmcsCtls(pVCpu);
11893 if (RT_FAILURE(rc))
11894 return rc;
11895
11896 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu);
11897 NOREF(uInvalidReason);
11898
11899#ifdef VBOX_STRICT
11900 uint32_t fIntrState;
11901 RTHCUINTREG uHCReg;
11902 uint64_t u64Val;
11903 uint32_t u32Val;
11904
11905 rc = hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
11906 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
11907 rc |= hmR0VmxReadEntryInstrLenVmcs(pVmxTransient);
11908 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
11909 AssertRCReturn(rc, rc);
11910
11911 Log4(("uInvalidReason %u\n", uInvalidReason));
11912 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
11913 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
11914 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
11915 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
11916
11917 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
11918 Log4(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
11919 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
11920 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
11921 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
11922 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
11923 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
11924 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
11925 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
11926 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
11927 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
11928 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
11929
11930 hmR0DumpRegs(pVCpu);
11931#else
11932 NOREF(pVmxTransient);
11933#endif
11934
11935 return VERR_VMX_INVALID_GUEST_STATE;
11936}
11937
11938
11939/**
11940 * VM-exit handler for VM-entry failure due to an MSR-load
11941 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
11942 */
11943HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11944{
11945 AssertMsgFailed(("Unexpected MSR-load exit\n"));
11946 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11947}
11948
11949
11950/**
11951 * VM-exit handler for VM-entry failure due to a machine-check event
11952 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
11953 */
11954HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11955{
11956 AssertMsgFailed(("Unexpected machine-check event exit\n"));
11957 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11958}
11959
11960
11961/**
11962 * VM-exit handler for all undefined reasons. Should never ever happen.. in
11963 * theory.
11964 */
11965HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11966{
11967 RT_NOREF2(pVCpu, pVmxTransient);
11968 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d\n", pVmxTransient->uExitReason));
11969 return VERR_VMX_UNDEFINED_EXIT_CODE;
11970}
11971
11972
11973/**
11974 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
11975 * (VMX_EXIT_GDTR_IDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
11976 * Conditional VM-exit.
11977 */
11978HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11979{
11980 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11981
11982 /* By default, we don't enable VMX_PROC_CTLS2_DESCRIPTOR_TABLE_EXIT. */
11983 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
11984 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & VMX_PROC_CTLS2_DESC_TABLE_EXIT)
11985 return VERR_EM_INTERPRETER;
11986 AssertMsgFailed(("Unexpected XDTR access\n"));
11987 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11988}
11989
11990
11991/**
11992 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
11993 */
11994HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
11995{
11996 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11997
11998 /* By default, we don't enable VMX_PROC_CTLS2_RDRAND_EXIT. */
11999 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & VMX_PROC_CTLS2_RDRAND_EXIT)
12000 return VERR_EM_INTERPRETER;
12001 AssertMsgFailed(("Unexpected RDRAND exit\n"));
12002 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
12003}
12004
12005
12006/**
12007 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
12008 */
12009HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12010{
12011 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12012
12013 /** @todo Optimize this: We currently drag in in the whole MSR state
12014 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
12015 * MSRs required. That would require changes to IEM and possibly CPUM too.
12016 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
12017 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
12018 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12019 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
12020 switch (idMsr)
12021 {
12022 /* The FS and GS base MSRs are not part of the above all-MSRs mask. */
12023 case MSR_K8_FS_BASE: rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_FS); break;
12024 case MSR_K8_GS_BASE: rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_GS); break;
12025 }
12026 AssertRCReturn(rc, rc);
12027
12028 Log4Func(("ecx=%#RX32\n", idMsr));
12029
12030#ifdef VBOX_STRICT
12031 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
12032 {
12033 if ( hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr)
12034 && idMsr != MSR_K6_EFER)
12035 {
12036 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
12037 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
12038 }
12039 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
12040 {
12041 VMXMSREXITREAD enmRead;
12042 VMXMSREXITWRITE enmWrite;
12043 int rc2 = HMGetVmxMsrPermission(pVCpu->hm.s.vmx.pvMsrBitmap, idMsr, &enmRead, &enmWrite);
12044 AssertRCReturn(rc2, rc2);
12045 if (enmRead == VMXMSREXIT_PASSTHRU_READ)
12046 {
12047 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
12048 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
12049 }
12050 }
12051 }
12052#endif
12053
12054 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbInstr);
12055 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
12056 if (rcStrict == VINF_SUCCESS)
12057 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
12058 | HM_CHANGED_GUEST_RAX | HM_CHANGED_GUEST_RDX);
12059 else if (rcStrict == VINF_IEM_RAISED_XCPT)
12060 {
12061 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
12062 rcStrict = VINF_SUCCESS;
12063 }
12064 else
12065 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ, ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
12066
12067 return rcStrict;
12068}
12069
12070
12071/**
12072 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
12073 */
12074HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12075{
12076 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12077
12078 /** @todo Optimize this: We currently drag in in the whole MSR state
12079 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
12080 * MSRs required. That would require changes to IEM and possibly CPUM too.
12081 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
12082 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
12083 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12084 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
12085 | CPUMCTX_EXTRN_ALL_MSRS);
12086 switch (idMsr)
12087 {
12088 /*
12089 * The FS and GS base MSRs are not part of the above all-MSRs mask.
12090 *
12091 * Although we don't need to fetch the base as it will be overwritten shortly, while
12092 * loading guest-state we would also load the entire segment register including limit
12093 * and attributes and thus we need to load them here.
12094 */
12095 case MSR_K8_FS_BASE: rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_FS); break;
12096 case MSR_K8_GS_BASE: rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_GS); break;
12097 }
12098 AssertRCReturn(rc, rc);
12099
12100 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
12101
12102 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbInstr);
12103 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
12104
12105 if (rcStrict == VINF_SUCCESS)
12106 {
12107 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
12108
12109 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
12110 if ( idMsr == MSR_IA32_APICBASE
12111 || ( idMsr >= MSR_IA32_X2APIC_START
12112 && idMsr <= MSR_IA32_X2APIC_END))
12113 {
12114 /*
12115 * We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register
12116 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before IEM changes it.
12117 */
12118 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
12119 }
12120 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
12121 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
12122 else if (idMsr == MSR_K6_EFER)
12123 {
12124 /*
12125 * If the guest touches EFER we need to update the VM-Entry and VM-Exit controls as well,
12126 * even if it is -not- touching bits that cause paging mode changes (LMA/LME). We care about
12127 * the other bits as well, SCE and NXE. See @bugref{7368}.
12128 */
12129 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS
12130 | HM_CHANGED_VMX_EXIT_CTLS);
12131 }
12132
12133 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */
12134 if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
12135 {
12136 switch (idMsr)
12137 {
12138 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
12139 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
12140 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
12141 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS); break;
12142 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS); break;
12143 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
12144 default:
12145 {
12146 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr))
12147 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
12148 else if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
12149 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
12150 break;
12151 }
12152 }
12153 }
12154#ifdef VBOX_STRICT
12155 else
12156 {
12157 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
12158 switch (idMsr)
12159 {
12160 case MSR_IA32_SYSENTER_CS:
12161 case MSR_IA32_SYSENTER_EIP:
12162 case MSR_IA32_SYSENTER_ESP:
12163 case MSR_K8_FS_BASE:
12164 case MSR_K8_GS_BASE:
12165 {
12166 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
12167 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
12168 }
12169
12170 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
12171 default:
12172 {
12173 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr))
12174 {
12175 /* EFER writes are always intercepted, see hmR0VmxExportGuestMsrs(). */
12176 if (idMsr != MSR_K6_EFER)
12177 {
12178 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
12179 idMsr));
12180 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
12181 }
12182 }
12183
12184 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
12185 {
12186 VMXMSREXITREAD enmRead;
12187 VMXMSREXITWRITE enmWrite;
12188 int rc2 = HMGetVmxMsrPermission(pVCpu->hm.s.vmx.pvMsrBitmap, idMsr, &enmRead, &enmWrite);
12189 AssertRCReturn(rc2, rc2);
12190 if (enmWrite == VMXMSREXIT_PASSTHRU_WRITE)
12191 {
12192 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
12193 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
12194 }
12195 }
12196 break;
12197 }
12198 }
12199 }
12200#endif /* VBOX_STRICT */
12201 }
12202 else if (rcStrict == VINF_IEM_RAISED_XCPT)
12203 {
12204 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
12205 rcStrict = VINF_SUCCESS;
12206 }
12207 else
12208 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE, ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
12209
12210 return rcStrict;
12211}
12212
12213
12214/**
12215 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
12216 */
12217HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12218{
12219 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12220 /** @todo The guest has likely hit a contended spinlock. We might want to
12221 * poke a schedule different guest VCPU. */
12222 return VINF_EM_RAW_INTERRUPT;
12223}
12224
12225
12226/**
12227 * VM-exit handler for when the TPR value is lowered below the specified
12228 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
12229 */
12230HMVMX_EXIT_NSRC_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12231{
12232 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12233 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
12234
12235 /*
12236 * The TPR shadow would've been synced with the APIC TPR in hmR0VmxPostRunGuest(). We'll re-evaluate
12237 * pending interrupts and inject them before the next VM-entry so we can just continue execution here.
12238 */
12239 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
12240 return VINF_SUCCESS;
12241}
12242
12243
12244/**
12245 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
12246 * VM-exit.
12247 *
12248 * @retval VINF_SUCCESS when guest execution can continue.
12249 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
12250 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
12251 * interpreter.
12252 */
12253HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12254{
12255 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12256 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
12257
12258 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
12259 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12260 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
12261 AssertRCReturn(rc, rc);
12262
12263 VBOXSTRICTRC rcStrict;
12264 PVM pVM = pVCpu->CTX_SUFF(pVM);
12265 RTGCUINTPTR const uExitQual = pVmxTransient->uExitQual;
12266 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
12267 switch (uAccessType)
12268 {
12269 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE: /* MOV to CRx */
12270 {
12271 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
12272 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_REGISTER(uExitQual),
12273 VMX_EXIT_QUAL_CRX_GENREG(uExitQual));
12274 AssertMsg( rcStrict == VINF_SUCCESS
12275 || rcStrict == VINF_IEM_RAISED_XCPT
12276 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12277
12278 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual))
12279 {
12280 case 0:
12281 {
12282 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
12283 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
12284 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
12285 Log4Func(("CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
12286
12287 /*
12288 * This is a kludge for handling switches back to real mode when we try to use
12289 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
12290 * deal with special selector values, so we have to return to ring-3 and run
12291 * there till the selector values are V86 mode compatible.
12292 *
12293 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
12294 * latter is an alias for VINF_IEM_RAISED_XCPT which is converted to VINF_SUCCESs
12295 * at the end of this function.
12296 */
12297 if ( rc == VINF_SUCCESS
12298 && !pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest
12299 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
12300 && (uOldCr0 & X86_CR0_PE)
12301 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
12302 {
12303 /** @todo check selectors rather than returning all the time. */
12304 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
12305 rcStrict = VINF_EM_RESCHEDULE_REM;
12306 }
12307 break;
12308 }
12309
12310 case 2:
12311 {
12312 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write);
12313 /* Nothing to do here, CR2 it's not part of the VMCS. */
12314 break;
12315 }
12316
12317 case 3:
12318 {
12319 Assert( !pVM->hm.s.fNestedPaging
12320 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
12321 || pVCpu->hm.s.fUsingDebugLoop);
12322 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write);
12323 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
12324 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
12325 Log4Func(("CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
12326 break;
12327 }
12328
12329 case 4:
12330 {
12331 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write);
12332 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
12333 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
12334 Log4Func(("CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
12335 pVCpu->cpum.GstCtx.cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
12336 break;
12337 }
12338
12339 case 8:
12340 {
12341 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write);
12342 Assert(!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
12343 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
12344 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
12345 break;
12346 }
12347 default:
12348 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQual)));
12349 break;
12350 }
12351 break;
12352 }
12353
12354 case VMX_EXIT_QUAL_CRX_ACCESS_READ: /* MOV from CRx */
12355 {
12356 Assert( !pVM->hm.s.fNestedPaging
12357 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
12358 || pVCpu->hm.s.fUsingDebugLoop
12359 || VMX_EXIT_QUAL_CRX_REGISTER(uExitQual) != 3);
12360 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
12361 Assert( VMX_EXIT_QUAL_CRX_REGISTER(uExitQual) != 8
12362 || !(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
12363
12364 rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_GENREG(uExitQual),
12365 VMX_EXIT_QUAL_CRX_REGISTER(uExitQual));
12366 AssertMsg( rcStrict == VINF_SUCCESS
12367 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12368#ifdef VBOX_WITH_STATISTICS
12369 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual))
12370 {
12371 case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
12372 case 2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break;
12373 case 3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break;
12374 case 4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break;
12375 case 8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break;
12376 }
12377#endif
12378 Log4Func(("CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQual),
12379 VBOXSTRICTRC_VAL(rcStrict)));
12380 if (VMX_EXIT_QUAL_CRX_GENREG(uExitQual) == X86_GREG_xSP)
12381 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
12382 else
12383 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
12384 break;
12385 }
12386
12387 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
12388 {
12389 rcStrict = IEMExecDecodedClts(pVCpu, pVmxTransient->cbInstr);
12390 AssertMsg( rcStrict == VINF_SUCCESS
12391 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12392
12393 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
12394 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
12395 Log4Func(("CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
12396 break;
12397 }
12398
12399 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
12400 {
12401 /* Note! LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here. */
12402 rc = hmR0VmxReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
12403 AssertRCReturn(rc, rc);
12404 rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual),
12405 pVmxTransient->uGuestLinearAddr);
12406 AssertMsg( rcStrict == VINF_SUCCESS
12407 || rcStrict == VINF_IEM_RAISED_XCPT
12408 , ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12409
12410 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
12411 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
12412 Log4Func(("LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
12413 break;
12414 }
12415
12416 default:
12417 AssertMsgFailedReturn(("Invalid access-type in Mov CRx VM-exit qualification %#x\n", uAccessType),
12418 VERR_VMX_UNEXPECTED_EXCEPTION);
12419 }
12420
12421 Assert( (pVCpu->hm.s.fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
12422 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
12423 if (rcStrict == VINF_IEM_RAISED_XCPT)
12424 {
12425 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
12426 rcStrict = VINF_SUCCESS;
12427 }
12428
12429 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
12430 NOREF(pVM);
12431 return rcStrict;
12432}
12433
12434
12435/**
12436 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
12437 * VM-exit.
12438 */
12439HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12440{
12441 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12442 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
12443
12444 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
12445 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
12446 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12447 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER);
12448 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
12449 AssertRCReturn(rc, rc);
12450
12451 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
12452 uint32_t uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
12453 uint8_t uIOWidth = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQual);
12454 bool fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
12455 bool fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
12456 bool fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
12457 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction;
12458 AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_VMX_IPE_1);
12459
12460 /*
12461 * Update exit history to see if this exit can be optimized.
12462 */
12463 VBOXSTRICTRC rcStrict;
12464 PCEMEXITREC pExitRec = NULL;
12465 if ( !fGstStepping
12466 && !fDbgStepping)
12467 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
12468 !fIOString
12469 ? !fIOWrite
12470 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
12471 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
12472 : !fIOWrite
12473 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
12474 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
12475 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
12476 if (!pExitRec)
12477 {
12478 /* I/O operation lookup arrays. */
12479 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */
12480 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
12481 uint32_t const cbValue = s_aIOSizes[uIOWidth];
12482 uint32_t const cbInstr = pVmxTransient->cbInstr;
12483 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
12484 PVM pVM = pVCpu->CTX_SUFF(pVM);
12485 if (fIOString)
12486 {
12487 /*
12488 * INS/OUTS - I/O String instruction.
12489 *
12490 * Use instruction-information if available, otherwise fall back on
12491 * interpreting the instruction.
12492 */
12493 Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
12494 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
12495 bool const fInsOutsInfo = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
12496 if (fInsOutsInfo)
12497 {
12498 int rc2 = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
12499 AssertRCReturn(rc2, rc2);
12500 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
12501 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
12502 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
12503 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
12504 if (fIOWrite)
12505 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
12506 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
12507 else
12508 {
12509 /*
12510 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
12511 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
12512 * See Intel Instruction spec. for "INS".
12513 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
12514 */
12515 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
12516 }
12517 }
12518 else
12519 rcStrict = IEMExecOne(pVCpu);
12520
12521 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
12522 fUpdateRipAlready = true;
12523 }
12524 else
12525 {
12526 /*
12527 * IN/OUT - I/O instruction.
12528 */
12529 Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
12530 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth];
12531 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
12532 if (fIOWrite)
12533 {
12534 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
12535 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
12536 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
12537 && !pCtx->eflags.Bits.u1TF)
12538 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
12539 }
12540 else
12541 {
12542 uint32_t u32Result = 0;
12543 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
12544 if (IOM_SUCCESS(rcStrict))
12545 {
12546 /* Save result of I/O IN instr. in AL/AX/EAX. */
12547 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
12548 }
12549 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
12550 && !pCtx->eflags.Bits.u1TF)
12551 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
12552 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
12553 }
12554 }
12555
12556 if (IOM_SUCCESS(rcStrict))
12557 {
12558 if (!fUpdateRipAlready)
12559 {
12560 hmR0VmxAdvanceGuestRipBy(pVCpu, cbInstr);
12561 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
12562 }
12563
12564 /*
12565 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
12566 * while booting Fedora 17 64-bit guest.
12567 *
12568 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
12569 */
12570 if (fIOString)
12571 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
12572
12573 /*
12574 * If any I/O breakpoints are armed, we need to check if one triggered
12575 * and take appropriate action.
12576 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
12577 */
12578 rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_DR7);
12579 AssertRCReturn(rc, rc);
12580
12581 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
12582 * execution engines about whether hyper BPs and such are pending. */
12583 uint32_t const uDr7 = pCtx->dr[7];
12584 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
12585 && X86_DR7_ANY_RW_IO(uDr7)
12586 && (pCtx->cr4 & X86_CR4_DE))
12587 || DBGFBpIsHwIoArmed(pVM)))
12588 {
12589 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
12590
12591 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
12592 VMMRZCallRing3Disable(pVCpu);
12593 HM_DISABLE_PREEMPT(pVCpu);
12594
12595 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
12596
12597 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
12598 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
12599 {
12600 /* Raise #DB. */
12601 if (fIsGuestDbgActive)
12602 ASMSetDR6(pCtx->dr[6]);
12603 if (pCtx->dr[7] != uDr7)
12604 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_DR7;
12605
12606 hmR0VmxSetPendingXcptDB(pVCpu);
12607 }
12608 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
12609 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
12610 else if ( rcStrict2 != VINF_SUCCESS
12611 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
12612 rcStrict = rcStrict2;
12613 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
12614
12615 HM_RESTORE_PREEMPT();
12616 VMMRZCallRing3Enable(pVCpu);
12617 }
12618 }
12619
12620#ifdef VBOX_STRICT
12621 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
12622 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
12623 Assert(!fIOWrite);
12624 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
12625 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
12626 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
12627 Assert(fIOWrite);
12628 else
12629 {
12630# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
12631 * statuses, that the VMM device and some others may return. See
12632 * IOM_SUCCESS() for guidance. */
12633 AssertMsg( RT_FAILURE(rcStrict)
12634 || rcStrict == VINF_SUCCESS
12635 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
12636 || rcStrict == VINF_EM_DBG_BREAKPOINT
12637 || rcStrict == VINF_EM_RAW_GUEST_TRAP
12638 || rcStrict == VINF_EM_RAW_TO_R3
12639 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12640# endif
12641 }
12642#endif
12643 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
12644 }
12645 else
12646 {
12647 /*
12648 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
12649 */
12650 int rc2 = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
12651 AssertRCReturn(rc2, rc2);
12652 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead
12653 : fIOWrite ? &pVCpu->hm.s.StatExitIOStringWrite : &pVCpu->hm.s.StatExitIOStringRead);
12654 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
12655 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
12656 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
12657 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOWidth));
12658
12659 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
12660 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
12661
12662 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
12663 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
12664 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
12665 }
12666 return rcStrict;
12667}
12668
12669
12670/**
12671 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
12672 * VM-exit.
12673 */
12674HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12675{
12676 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12677
12678 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
12679 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
12680 AssertRCReturn(rc, rc);
12681 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
12682 {
12683 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
12684 AssertRCReturn(rc, rc);
12685 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
12686 {
12687 uint32_t uErrCode;
12688 RTGCUINTPTR GCPtrFaultAddress;
12689 uint32_t const uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
12690 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
12691 bool const fErrorCodeValid = VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo);
12692 if (fErrorCodeValid)
12693 {
12694 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
12695 AssertRCReturn(rc, rc);
12696 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
12697 }
12698 else
12699 uErrCode = 0;
12700
12701 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
12702 && uVector == X86_XCPT_PF)
12703 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
12704 else
12705 GCPtrFaultAddress = 0;
12706
12707 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12708 AssertRCReturn(rc, rc);
12709
12710 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
12711 pVmxTransient->cbInstr, uErrCode, GCPtrFaultAddress);
12712
12713 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", uIntType, uVector));
12714 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
12715 return VINF_EM_RAW_INJECT_TRPM_EVENT;
12716 }
12717 }
12718
12719 /* Fall back to the interpreter to emulate the task-switch. */
12720 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
12721 return VERR_EM_INTERPRETER;
12722}
12723
12724
12725/**
12726 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
12727 */
12728HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12729{
12730 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12731 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG);
12732 pVCpu->hm.s.vmx.Ctls.u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
12733 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls);
12734 AssertRCReturn(rc, rc);
12735 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
12736 return VINF_EM_DBG_STEPPED;
12737}
12738
12739
12740/**
12741 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
12742 */
12743HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12744{
12745 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12746
12747 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
12748
12749 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
12750 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
12751 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
12752 {
12753 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
12754 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
12755 {
12756 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret);
12757 return VINF_EM_RAW_INJECT_TRPM_EVENT;
12758 }
12759 }
12760 else
12761 {
12762 if (rcStrict1 == VINF_HM_DOUBLE_FAULT)
12763 rcStrict1 = VINF_SUCCESS;
12764 return rcStrict1;
12765 }
12766
12767 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
12768 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
12769 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
12770 AssertRCReturn(rc, rc);
12771
12772 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
12773 uint32_t uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
12774 VBOXSTRICTRC rcStrict2;
12775 switch (uAccessType)
12776 {
12777 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
12778 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
12779 {
12780 AssertMsg( !(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
12781 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
12782 ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
12783
12784 RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64MsrApicBase; /* Always up-to-date, u64MsrApicBase is not part of the VMCS. */
12785 GCPhys &= PAGE_BASE_GC_MASK;
12786 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
12787 PVM pVM = pVCpu->CTX_SUFF(pVM);
12788 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
12789 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
12790
12791 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
12792 rcStrict2 = IOMMMIOPhysHandler(pVM, pVCpu,
12793 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW,
12794 CPUMCTX2CORE(pCtx), GCPhys);
12795 Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
12796 if ( rcStrict2 == VINF_SUCCESS
12797 || rcStrict2 == VERR_PAGE_TABLE_NOT_PRESENT
12798 || rcStrict2 == VERR_PAGE_NOT_PRESENT)
12799 {
12800 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
12801 | HM_CHANGED_GUEST_APIC_TPR);
12802 rcStrict2 = VINF_SUCCESS;
12803 }
12804 break;
12805 }
12806
12807 default:
12808 Log4Func(("uAccessType=%#x\n", uAccessType));
12809 rcStrict2 = VINF_EM_RAW_EMULATE_INSTR;
12810 break;
12811 }
12812
12813 if (rcStrict2 != VINF_SUCCESS)
12814 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchApicAccessToR3);
12815 return rcStrict2;
12816}
12817
12818
12819/**
12820 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
12821 * VM-exit.
12822 */
12823HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12824{
12825 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12826
12827 /* We should -not- get this VM-exit if the guest's debug registers were active. */
12828 if (pVmxTransient->fWasGuestDebugStateActive)
12829 {
12830 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
12831 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
12832 }
12833
12834 if ( !pVCpu->hm.s.fSingleInstruction
12835 && !pVmxTransient->fWasHyperDebugStateActive)
12836 {
12837 Assert(!DBGFIsStepping(pVCpu));
12838 Assert(pVCpu->hm.s.vmx.Ctls.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB));
12839
12840 /* Don't intercept MOV DRx any more. */
12841 pVCpu->hm.s.vmx.Ctls.u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
12842 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls);
12843 AssertRCReturn(rc, rc);
12844
12845 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
12846 VMMRZCallRing3Disable(pVCpu);
12847 HM_DISABLE_PREEMPT(pVCpu);
12848
12849 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
12850 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
12851 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
12852
12853 HM_RESTORE_PREEMPT();
12854 VMMRZCallRing3Enable(pVCpu);
12855
12856#ifdef VBOX_WITH_STATISTICS
12857 rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
12858 AssertRCReturn(rc, rc);
12859 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
12860 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
12861 else
12862 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
12863#endif
12864 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
12865 return VINF_SUCCESS;
12866 }
12867
12868 /*
12869 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date.
12870 * Update the segment registers and DR7 from the CPU.
12871 */
12872 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
12873 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
12874 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
12875 AssertRCReturn(rc, rc);
12876 Log4Func(("CS:RIP=%04x:%08RX64\n", pCtx->cs.Sel, pCtx->rip));
12877
12878 PVM pVM = pVCpu->CTX_SUFF(pVM);
12879 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
12880 {
12881 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
12882 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
12883 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
12884 if (RT_SUCCESS(rc))
12885 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
12886 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
12887 }
12888 else
12889 {
12890 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
12891 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
12892 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
12893 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
12894 }
12895
12896 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
12897 if (RT_SUCCESS(rc))
12898 {
12899 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
12900 AssertRCReturn(rc2, rc2);
12901 return VINF_SUCCESS;
12902 }
12903 return rc;
12904}
12905
12906
12907/**
12908 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
12909 * Conditional VM-exit.
12910 */
12911HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12912{
12913 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12914 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
12915
12916 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
12917 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
12918 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
12919 {
12920 /* If event delivery causes an EPT misconfig (MMIO), go back to instruction emulation as otherwise
12921 injecting the original pending event would most likely cause the same EPT misconfig VM-exit. */
12922 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
12923 {
12924 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret);
12925 return VINF_EM_RAW_INJECT_TRPM_EVENT;
12926 }
12927 }
12928 else
12929 {
12930 if (rcStrict1 == VINF_HM_DOUBLE_FAULT)
12931 rcStrict1 = VINF_SUCCESS;
12932 return rcStrict1;
12933 }
12934
12935 /*
12936 * Get sufficent state and update the exit history entry.
12937 */
12938 RTGCPHYS GCPhys;
12939 int rc = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys);
12940 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
12941 AssertRCReturn(rc, rc);
12942
12943 VBOXSTRICTRC rcStrict;
12944 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
12945 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
12946 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
12947 if (!pExitRec)
12948 {
12949 /*
12950 * If we succeed, resume guest execution.
12951 * If we fail in interpreting the instruction because we couldn't get the guest physical address
12952 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
12953 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
12954 * weird case. See @bugref{6043}.
12955 */
12956 PVM pVM = pVCpu->CTX_SUFF(pVM);
12957 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
12958 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
12959 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
12960 if ( rcStrict == VINF_SUCCESS
12961 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
12962 || rcStrict == VERR_PAGE_NOT_PRESENT)
12963 {
12964 /* Successfully handled MMIO operation. */
12965 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
12966 | HM_CHANGED_GUEST_APIC_TPR);
12967 rcStrict = VINF_SUCCESS;
12968 }
12969 }
12970 else
12971 {
12972 /*
12973 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
12974 */
12975 int rc2 = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
12976 AssertRCReturn(rc2, rc2);
12977
12978 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
12979 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
12980
12981 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
12982 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
12983
12984 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
12985 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
12986 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
12987 }
12988 return VBOXSTRICTRC_TODO(rcStrict);
12989}
12990
12991
12992/**
12993 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
12994 * VM-exit.
12995 */
12996HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12997{
12998 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12999 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
13000
13001 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
13002 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
13003 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
13004 {
13005 /* In the unlikely case that the EPT violation happened as a result of delivering an event, log it. */
13006 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
13007 Log4Func(("EPT violation with an event pending u64IntInfo=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo));
13008 }
13009 else
13010 {
13011 if (rcStrict1 == VINF_HM_DOUBLE_FAULT)
13012 rcStrict1 = VINF_SUCCESS;
13013 return rcStrict1;
13014 }
13015
13016 RTGCPHYS GCPhys;
13017 int rc = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys);
13018 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
13019 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
13020 AssertRCReturn(rc, rc);
13021
13022 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
13023 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQual));
13024
13025 RTGCUINT uErrorCode = 0;
13026 if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_INSTR_FETCH)
13027 uErrorCode |= X86_TRAP_PF_ID;
13028 if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_DATA_WRITE)
13029 uErrorCode |= X86_TRAP_PF_RW;
13030 if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ENTRY_PRESENT)
13031 uErrorCode |= X86_TRAP_PF_P;
13032
13033 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
13034
13035
13036 /* Handle the pagefault trap for the nested shadow table. */
13037 PVM pVM = pVCpu->CTX_SUFF(pVM);
13038 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
13039
13040 Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQual, GCPhys, uErrorCode,
13041 pCtx->cs.Sel, pCtx->rip));
13042
13043 VBOXSTRICTRC rcStrict2 = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
13044 TRPMResetTrap(pVCpu);
13045
13046 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
13047 if ( rcStrict2 == VINF_SUCCESS
13048 || rcStrict2 == VERR_PAGE_TABLE_NOT_PRESENT
13049 || rcStrict2 == VERR_PAGE_NOT_PRESENT)
13050 {
13051 /* Successfully synced our nested page tables. */
13052 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
13053 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
13054 return VINF_SUCCESS;
13055 }
13056
13057 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
13058 return rcStrict2;
13059}
13060
13061/** @} */
13062
13063/** @name VM-exit exception handlers.
13064 * @{
13065 */
13066/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
13067/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit exception handlers =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
13068/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
13069
13070/**
13071 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
13072 */
13073static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13074{
13075 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13076 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
13077
13078 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0);
13079 AssertRCReturn(rc, rc);
13080
13081 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
13082 {
13083 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
13084 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
13085
13086 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
13087 * provides VM-exit instruction length. If this causes problem later,
13088 * disassemble the instruction like it's done on AMD-V. */
13089 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
13090 AssertRCReturn(rc2, rc2);
13091 return rc;
13092 }
13093
13094 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
13095 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
13096 return rc;
13097}
13098
13099
13100/**
13101 * VM-exit exception handler for \#BP (Breakpoint exception).
13102 */
13103static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13104{
13105 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13106 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
13107
13108 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
13109 AssertRCReturn(rc, rc);
13110
13111 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
13112 rc = DBGFRZTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
13113 if (rc == VINF_EM_RAW_GUEST_TRAP)
13114 {
13115 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
13116 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13117 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
13118 AssertRCReturn(rc, rc);
13119
13120 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
13121 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
13122 }
13123
13124 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
13125 return rc;
13126}
13127
13128
13129/**
13130 * VM-exit exception handler for \#AC (alignment check exception).
13131 */
13132static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13133{
13134 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13135
13136 /*
13137 * Re-inject it. We'll detect any nesting before getting here.
13138 */
13139 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
13140 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13141 AssertRCReturn(rc, rc);
13142 Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
13143
13144 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
13145 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
13146 return VINF_SUCCESS;
13147}
13148
13149
13150/**
13151 * VM-exit exception handler for \#DB (Debug exception).
13152 */
13153static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13154{
13155 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13156 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
13157
13158 /*
13159 * Get the DR6-like values from the VM-exit qualification and pass it to DBGF
13160 * for processing.
13161 */
13162 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
13163
13164 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
13165 uint64_t uDR6 = X86_DR6_INIT_VAL;
13166 uDR6 |= (pVmxTransient->uExitQual & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
13167
13168 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
13169 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
13170 Log6Func(("rc=%Rrc\n", rc));
13171 if (rc == VINF_EM_RAW_GUEST_TRAP)
13172 {
13173 /*
13174 * The exception was for the guest. Update DR6, DR7.GD and
13175 * IA32_DEBUGCTL.LBR before forwarding it.
13176 * (See Intel spec. 27.1 "Architectural State before a VM-Exit".)
13177 */
13178 VMMRZCallRing3Disable(pVCpu);
13179 HM_DISABLE_PREEMPT(pVCpu);
13180
13181 pCtx->dr[6] &= ~X86_DR6_B_MASK;
13182 pCtx->dr[6] |= uDR6;
13183 if (CPUMIsGuestDebugStateActive(pVCpu))
13184 ASMSetDR6(pCtx->dr[6]);
13185
13186 HM_RESTORE_PREEMPT();
13187 VMMRZCallRing3Enable(pVCpu);
13188
13189 rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_DR7);
13190 AssertRCReturn(rc, rc);
13191
13192 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
13193 pCtx->dr[7] &= ~X86_DR7_GD;
13194
13195 /* Paranoia. */
13196 pCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
13197 pCtx->dr[7] |= X86_DR7_RA1_MASK;
13198
13199 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pCtx->dr[7]);
13200 AssertRCReturn(rc, rc);
13201
13202 /*
13203 * Raise #DB in the guest.
13204 *
13205 * It is important to reflect exactly what the VM-exit gave us (preserving the
13206 * interruption-type) rather than use hmR0VmxSetPendingXcptDB() as the #DB could've
13207 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
13208 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
13209 *
13210 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
13211 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
13212 */
13213 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
13214 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13215 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
13216 AssertRCReturn(rc, rc);
13217 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
13218 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
13219 return VINF_SUCCESS;
13220 }
13221
13222 /*
13223 * Not a guest trap, must be a hypervisor related debug event then.
13224 * Update DR6 in case someone is interested in it.
13225 */
13226 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
13227 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
13228 CPUMSetHyperDR6(pVCpu, uDR6);
13229
13230 return rc;
13231}
13232
13233
13234/**
13235 * Hacks its way around the lovely mesa driver's backdoor accesses.
13236 *
13237 * @sa hmR0SvmHandleMesaDrvGp
13238 */
13239static int hmR0VmxHandleMesaDrvGp(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
13240{
13241 Log(("hmR0VmxHandleMesaDrvGp: at %04x:%08RX64 rcx=%RX64 rbx=%RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
13242 RT_NOREF(pCtx);
13243
13244 /* For now we'll just skip the instruction. */
13245 return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
13246}
13247
13248
13249/**
13250 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
13251 * backdoor logging w/o checking what it is running inside.
13252 *
13253 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
13254 * backdoor port and magic numbers loaded in registers.
13255 *
13256 * @returns true if it is, false if it isn't.
13257 * @sa hmR0SvmIsMesaDrvGp
13258 */
13259DECLINLINE(bool) hmR0VmxIsMesaDrvGp(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
13260{
13261 /* 0xed: IN eAX,dx */
13262 uint8_t abInstr[1];
13263 if (pVmxTransient->cbInstr != sizeof(abInstr))
13264 return false;
13265
13266 /* Check that it is #GP(0). */
13267 if (pVmxTransient->uExitIntErrorCode != 0)
13268 return false;
13269
13270 /* Check magic and port. */
13271 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
13272 /*Log(("hmR0VmxIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
13273 if (pCtx->rax != UINT32_C(0x564d5868))
13274 return false;
13275 if (pCtx->dx != UINT32_C(0x5658))
13276 return false;
13277
13278 /* Flat ring-3 CS. */
13279 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
13280 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
13281 /*Log(("hmR0VmxIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
13282 if (pCtx->cs.Attr.n.u2Dpl != 3)
13283 return false;
13284 if (pCtx->cs.u64Base != 0)
13285 return false;
13286
13287 /* Check opcode. */
13288 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
13289 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
13290 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
13291 /*Log(("hmR0VmxIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
13292 if (RT_FAILURE(rc))
13293 return false;
13294 if (abInstr[0] != 0xed)
13295 return false;
13296
13297 return true;
13298}
13299
13300
13301/**
13302 * VM-exit exception handler for \#GP (General-protection exception).
13303 *
13304 * @remarks Requires pVmxTransient->uExitIntInfo to be up-to-date.
13305 */
13306static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13307{
13308 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13309 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
13310
13311 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
13312 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
13313 { /* likely */ }
13314 else
13315 {
13316#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
13317 Assert(pVCpu->hm.s.fUsingDebugLoop || pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv);
13318#endif
13319 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
13320 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
13321 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
13322 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13323 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
13324 AssertRCReturn(rc, rc);
13325 Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pCtx->cs.Sel, pCtx->rip,
13326 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
13327
13328 if ( !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv
13329 || !hmR0VmxIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
13330 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
13331 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
13332 else
13333 rc = hmR0VmxHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
13334 return rc;
13335 }
13336
13337 Assert(CPUMIsGuestInRealModeEx(pCtx));
13338 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
13339
13340 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
13341 AssertRCReturn(rc, rc);
13342
13343 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
13344 if (rcStrict == VINF_SUCCESS)
13345 {
13346 if (!CPUMIsGuestInRealModeEx(pCtx))
13347 {
13348 /*
13349 * The guest is no longer in real-mode, check if we can continue executing the
13350 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
13351 */
13352 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
13353 if (HMCanExecuteVmxGuest(pVCpu, pCtx))
13354 {
13355 Log4Func(("Mode changed but guest still suitable for executing using VT-x\n"));
13356 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
13357 }
13358 else
13359 {
13360 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
13361 rcStrict = VINF_EM_RESCHEDULE;
13362 }
13363 }
13364 else
13365 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
13366 }
13367 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13368 {
13369 rcStrict = VINF_SUCCESS;
13370 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13371 }
13372 return VBOXSTRICTRC_VAL(rcStrict);
13373}
13374
13375
13376/**
13377 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
13378 * the exception reported in the VMX transient structure back into the VM.
13379 *
13380 * @remarks Requires uExitIntInfo in the VMX transient structure to be
13381 * up-to-date.
13382 */
13383static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13384{
13385 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13386#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
13387 AssertMsg(pVCpu->hm.s.fUsingDebugLoop || pVCpu->hm.s.vmx.RealMode.fRealOnV86Active,
13388 ("uVector=%#x u32XcptBitmap=%#X32\n",
13389 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVCpu->hm.s.vmx.Ctls.u32XcptBitmap));
13390#endif
13391
13392 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
13393 hmR0VmxCheckExitDueToEventDelivery(). */
13394 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
13395 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13396 AssertRCReturn(rc, rc);
13397 Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
13398
13399#ifdef DEBUG_ramshankar
13400 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
13401 uint8_t uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
13402 Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip));
13403#endif
13404
13405 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
13406 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
13407 return VINF_SUCCESS;
13408}
13409
13410
13411/**
13412 * VM-exit exception handler for \#PF (Page-fault exception).
13413 */
13414static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13415{
13416 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13417 PVM pVM = pVCpu->CTX_SUFF(pVM);
13418 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
13419 rc |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
13420 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
13421 AssertRCReturn(rc, rc);
13422
13423 if (!pVM->hm.s.fNestedPaging)
13424 { /* likely */ }
13425 else
13426 {
13427#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF)
13428 Assert(pVCpu->hm.s.fUsingDebugLoop);
13429#endif
13430 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
13431 if (RT_LIKELY(!pVmxTransient->fVectoringDoublePF))
13432 {
13433 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
13434 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
13435 }
13436 else
13437 {
13438 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
13439 hmR0VmxSetPendingXcptDF(pVCpu);
13440 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
13441 }
13442 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
13443 return rc;
13444 }
13445
13446 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
13447 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
13448 if (pVmxTransient->fVectoringPF)
13449 {
13450 Assert(pVCpu->hm.s.Event.fPending);
13451 return VINF_EM_RAW_INJECT_TRPM_EVENT;
13452 }
13453
13454 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
13455 rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
13456 AssertRCReturn(rc, rc);
13457
13458 Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQual, pCtx->cs.Sel,
13459 pCtx->rip, pVmxTransient->uExitIntErrorCode, pCtx->cr3));
13460
13461 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
13462 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
13463
13464 Log4Func(("#PF: rc=%Rrc\n", rc));
13465 if (rc == VINF_SUCCESS)
13466 {
13467 /*
13468 * This is typically a shadow page table sync or a MMIO instruction. But we may have
13469 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
13470 */
13471 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
13472 TRPMResetTrap(pVCpu);
13473 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
13474 return rc;
13475 }
13476
13477 if (rc == VINF_EM_RAW_GUEST_TRAP)
13478 {
13479 if (!pVmxTransient->fVectoringDoublePF)
13480 {
13481 /* It's a guest page fault and needs to be reflected to the guest. */
13482 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
13483 TRPMResetTrap(pVCpu);
13484 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
13485 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
13486 uGstErrorCode, pVmxTransient->uExitQual);
13487 }
13488 else
13489 {
13490 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
13491 TRPMResetTrap(pVCpu);
13492 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
13493 hmR0VmxSetPendingXcptDF(pVCpu);
13494 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
13495 }
13496
13497 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
13498 return VINF_SUCCESS;
13499 }
13500
13501 TRPMResetTrap(pVCpu);
13502 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
13503 return rc;
13504}
13505
13506/** @} */
13507
13508#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13509/** @name Nested-guest VM-exit handlers.
13510 * @{
13511 */
13512/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
13513/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= Nested-guest VM-exit handlers =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
13514/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
13515
13516/**
13517 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
13518 */
13519HMVMX_EXIT_DECL hmR0VmxExitVmclear(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13520{
13521 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13522
13523 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13524 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
13525 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
13526 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
13527 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
13528 AssertRCReturn(rc, rc);
13529
13530 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13531
13532 VMXVEXITINFO ExitInfo;
13533 RT_ZERO(ExitInfo);
13534 ExitInfo.uReason = pVmxTransient->uExitReason;
13535 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13536 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
13537 ExitInfo.cbInstr = pVmxTransient->cbInstr;
13538 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
13539
13540 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
13541 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13542 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
13543 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13544 {
13545 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13546 rcStrict = VINF_SUCCESS;
13547 }
13548 return rcStrict;
13549}
13550
13551
13552/**
13553 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
13554 */
13555HMVMX_EXIT_DECL hmR0VmxExitVmlaunch(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13556{
13557 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13558
13559 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13560 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
13561 AssertRCReturn(rc, rc);
13562
13563 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13564
13565 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbInstr, VMXINSTRID_VMLAUNCH);
13566 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13567 {
13568 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
13569 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
13570 }
13571 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
13572 return rcStrict;
13573}
13574
13575
13576/**
13577 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
13578 */
13579HMVMX_EXIT_DECL hmR0VmxExitVmptrld(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13580{
13581 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13582
13583 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13584 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
13585 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
13586 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
13587 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
13588 AssertRCReturn(rc, rc);
13589
13590 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13591
13592 VMXVEXITINFO ExitInfo;
13593 RT_ZERO(ExitInfo);
13594 ExitInfo.uReason = pVmxTransient->uExitReason;
13595 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13596 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
13597 ExitInfo.cbInstr = pVmxTransient->cbInstr;
13598 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
13599
13600 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
13601 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13602 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
13603 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13604 {
13605 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13606 rcStrict = VINF_SUCCESS;
13607 }
13608 return rcStrict;
13609}
13610
13611
13612/**
13613 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
13614 */
13615HMVMX_EXIT_DECL hmR0VmxExitVmptrst(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13616{
13617 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13618
13619 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13620 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
13621 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
13622 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
13623 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
13624 AssertRCReturn(rc, rc);
13625
13626 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13627
13628 VMXVEXITINFO ExitInfo;
13629 RT_ZERO(ExitInfo);
13630 ExitInfo.uReason = pVmxTransient->uExitReason;
13631 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13632 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
13633 ExitInfo.cbInstr = pVmxTransient->cbInstr;
13634 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
13635
13636 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
13637 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13638 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
13639 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13640 {
13641 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13642 rcStrict = VINF_SUCCESS;
13643 }
13644 return rcStrict;
13645}
13646
13647
13648/**
13649 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Unconditional VM-exit.
13650 */
13651HMVMX_EXIT_DECL hmR0VmxExitVmread(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13652{
13653 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13654
13655 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13656 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
13657 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
13658 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
13659 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
13660 AssertRCReturn(rc, rc);
13661
13662 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13663
13664 VMXVEXITINFO ExitInfo;
13665 RT_ZERO(ExitInfo);
13666 ExitInfo.uReason = pVmxTransient->uExitReason;
13667 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13668 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
13669 ExitInfo.cbInstr = pVmxTransient->cbInstr;
13670 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
13671 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
13672
13673 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
13674 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13675 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
13676 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13677 {
13678 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13679 rcStrict = VINF_SUCCESS;
13680 }
13681 return rcStrict;
13682}
13683
13684
13685/**
13686 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
13687 */
13688HMVMX_EXIT_DECL hmR0VmxExitVmresume(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13689{
13690 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13691
13692 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13693 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
13694 AssertRCReturn(rc, rc);
13695
13696 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13697
13698 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbInstr, VMXINSTRID_VMRESUME);
13699 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13700 {
13701 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
13702 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
13703 }
13704 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
13705 return rcStrict;
13706}
13707
13708
13709/**
13710 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Unconditional VM-exit.
13711 */
13712HMVMX_EXIT_DECL hmR0VmxExitVmwrite(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13713{
13714 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13715
13716 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13717 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
13718 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
13719 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
13720 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
13721 AssertRCReturn(rc, rc);
13722
13723 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13724
13725 VMXVEXITINFO ExitInfo;
13726 RT_ZERO(ExitInfo);
13727 ExitInfo.uReason = pVmxTransient->uExitReason;
13728 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13729 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
13730 ExitInfo.cbInstr = pVmxTransient->cbInstr;
13731 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
13732 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
13733
13734 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
13735 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13736 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
13737 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13738 {
13739 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13740 rcStrict = VINF_SUCCESS;
13741 }
13742 return rcStrict;
13743}
13744
13745
13746/**
13747 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
13748 */
13749HMVMX_EXIT_DECL hmR0VmxExitVmxoff(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13750{
13751 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13752
13753 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13754 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR4 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
13755 AssertRCReturn(rc, rc);
13756
13757 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13758
13759 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbInstr);
13760 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13761 {
13762 /* VMXOFF changes the internal hwvirt. state but not anything that's visible to the guest other than RIP. */
13763 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
13764 }
13765 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13766 {
13767 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13768 rcStrict = VINF_SUCCESS;
13769 }
13770 return rcStrict;
13771}
13772
13773
13774/**
13775 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
13776 */
13777HMVMX_EXIT_DECL hmR0VmxExitVmxon(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13778{
13779 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13780
13781 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13782 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
13783 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
13784 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
13785 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
13786 AssertRCReturn(rc, rc);
13787
13788 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13789
13790 VMXVEXITINFO ExitInfo;
13791 RT_ZERO(ExitInfo);
13792 ExitInfo.uReason = pVmxTransient->uExitReason;
13793 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13794 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
13795 ExitInfo.cbInstr = pVmxTransient->cbInstr;
13796 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
13797
13798 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
13799 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13800 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
13801 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13802 {
13803 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13804 rcStrict = VINF_SUCCESS;
13805 }
13806 return rcStrict;
13807}
13808
13809/** @} */
13810#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
13811
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette