VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 46814

Last change on this file since 46814 was 46789, checked in by vboxsync, 12 years ago

VMM/HM: Doxygen.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 384.9 KB
Line 
1/* $Id: HMVMXR0.cpp 46789 2013-06-25 17:50:27Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <iprt/asm-amd64-x86.h>
23#include <iprt/thread.h>
24#include <iprt/string.h>
25
26#include "HMInternal.h"
27#include <VBox/vmm/vm.h>
28#include "HWVMXR0.h"
29#include <VBox/vmm/pdmapi.h>
30#include <VBox/vmm/dbgf.h>
31#include <VBox/vmm/iom.h>
32#include <VBox/vmm/selm.h>
33#include <VBox/vmm/tm.h>
34#ifdef VBOX_WITH_REM
35# include <VBox/vmm/rem.h>
36#endif
37#ifdef DEBUG_ramshankar
38#define HMVMX_SAVE_FULL_GUEST_STATE
39#define HMVMX_SYNC_FULL_GUEST_STATE
40#define HMVMX_ALWAYS_TRAP_ALL_XCPTS
41#define HMVMX_ALWAYS_TRAP_PF
42#endif
43
44
45/*******************************************************************************
46* Defined Constants And Macros *
47*******************************************************************************/
48#if defined(RT_ARCH_AMD64)
49# define HMVMX_IS_64BIT_HOST_MODE() (true)
50typedef RTHCUINTREG HMVMXHCUINTREG;
51#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
52extern "C" uint32_t g_fVMXIs64bitHost;
53# define HMVMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)
54typedef uint64_t HMVMXHCUINTREG;
55#else
56# define HMVMX_IS_64BIT_HOST_MODE() (false)
57typedef RTHCUINTREG HMVMXHCUINTREG;
58#endif
59
60/** Use the function table. */
61#define HMVMX_USE_FUNCTION_TABLE
62
63/** This bit indicates the segment selector is unusable in VT-x. */
64#define HMVMX_SEL_UNUSABLE RT_BIT(16)
65
66/** Determine which tagged-TLB flush handler to use. */
67#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
68#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
69#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
70#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
71
72/** @name Updated-guest-state flags.
73 * @{ */
74#define HMVMX_UPDATED_GUEST_RIP RT_BIT(0)
75#define HMVMX_UPDATED_GUEST_RSP RT_BIT(1)
76#define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(2)
77#define HMVMX_UPDATED_GUEST_CR0 RT_BIT(3)
78#define HMVMX_UPDATED_GUEST_CR3 RT_BIT(4)
79#define HMVMX_UPDATED_GUEST_CR4 RT_BIT(5)
80#define HMVMX_UPDATED_GUEST_GDTR RT_BIT(6)
81#define HMVMX_UPDATED_GUEST_IDTR RT_BIT(7)
82#define HMVMX_UPDATED_GUEST_LDTR RT_BIT(8)
83#define HMVMX_UPDATED_GUEST_TR RT_BIT(9)
84#define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10)
85#define HMVMX_UPDATED_GUEST_DEBUG RT_BIT(11)
86#define HMVMX_UPDATED_GUEST_FS_BASE_MSR RT_BIT(12)
87#define HMVMX_UPDATED_GUEST_GS_BASE_MSR RT_BIT(13)
88#define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(14)
89#define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(15)
90#define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(16)
91#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(17)
92#define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(18)
93#define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(19)
94#define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \
95 | HMVMX_UPDATED_GUEST_RSP \
96 | HMVMX_UPDATED_GUEST_RFLAGS \
97 | HMVMX_UPDATED_GUEST_CR0 \
98 | HMVMX_UPDATED_GUEST_CR3 \
99 | HMVMX_UPDATED_GUEST_CR4 \
100 | HMVMX_UPDATED_GUEST_GDTR \
101 | HMVMX_UPDATED_GUEST_IDTR \
102 | HMVMX_UPDATED_GUEST_LDTR \
103 | HMVMX_UPDATED_GUEST_TR \
104 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \
105 | HMVMX_UPDATED_GUEST_DEBUG \
106 | HMVMX_UPDATED_GUEST_FS_BASE_MSR \
107 | HMVMX_UPDATED_GUEST_GS_BASE_MSR \
108 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \
109 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \
110 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \
111 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \
112 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \
113 | HMVMX_UPDATED_GUEST_APIC_STATE)
114/** @} */
115
116/**
117 * Flags to skip redundant reads of some common VMCS fields that are not part of
118 * the guest-CPU state but are in the transient structure.
119 */
120#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0)
121#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1)
122#define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2)
123#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3)
124#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4)
125#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5)
126
127/**
128 * Exception bitmap mask for real-mode guests (real-on-v86). We need to intercept all exceptions manually (except #PF).
129 * #NM is also handled spearetely, see hmR0VmxLoadGuestControlRegs(). #PF need not be intercepted even in real-mode if
130 * we have Nested Paging support.
131 */
132#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NMI) \
133 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
134 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \
135 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
136 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
137 | RT_BIT(X86_XCPT_MF) | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) \
138 | RT_BIT(X86_XCPT_XF))
139
140/**
141 * Exception bitmap mask for all contributory exceptions.
142 */
143#define HMVMX_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
144 | RT_BIT(X86_XCPT_DE))
145
146/** Maximum VM-instruction error number. */
147#define HMVMX_INSTR_ERROR_MAX 28
148
149/** Profiling macro. */
150#ifdef HM_PROFILE_EXIT_DISPATCH
151# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
152# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
153#else
154# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
155# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
156#endif
157
158
159/*******************************************************************************
160* Structures and Typedefs *
161*******************************************************************************/
162/** @name VMX transient.
163 *
164 * A state structure for holding miscellaneous information across
165 * VMX non-root operation and restored after the transition.
166 *
167 * @{ */
168typedef struct VMXTRANSIENT
169{
170 /** The host's rflags/eflags. */
171 RTCCUINTREG uEFlags;
172#if HC_ARCH_BITS == 32
173 uint32_t u32Alignment0;
174#endif
175 /** The guest's LSTAR MSR value used for TPR patching for 32-bit guests. */
176 uint64_t u64LStarMsr;
177 /** The guest's TPR value used for TPR shadowing. */
178 uint8_t u8GuestTpr;
179 /** Alignment. */
180 uint8_t abAlignment0[6];
181
182 /** The basic VM-exit reason. */
183 uint16_t uExitReason;
184 /** Alignment. */
185 uint16_t u16Alignment0;
186 /** The VM-exit interruption error code. */
187 uint32_t uExitIntrErrorCode;
188 /** The VM-exit exit qualification. */
189 uint64_t uExitQualification;
190
191 /** The VM-exit interruption-information field. */
192 uint32_t uExitIntrInfo;
193 /** The VM-exit instruction-length field. */
194 uint32_t cbInstr;
195 /** Whether the VM-entry failed or not. */
196 bool fVMEntryFailed;
197 /** Alignment. */
198 uint8_t abAlignment1[5];
199
200 /** The VM-entry interruption-information field. */
201 uint32_t uEntryIntrInfo;
202 /** The VM-entry exception error code field. */
203 uint32_t uEntryXcptErrorCode;
204 /** The VM-entry instruction length field. */
205 uint32_t cbEntryInstr;
206
207 /** IDT-vectoring information field. */
208 uint32_t uIdtVectoringInfo;
209 /** IDT-vectoring error code. */
210 uint32_t uIdtVectoringErrorCode;
211
212 /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */
213 uint32_t fVmcsFieldsRead;
214 /** Whether TSC-offsetting should be setup before VM-entry. */
215 bool fUpdateTscOffsettingAndPreemptTimer;
216 /** Whether the VM-exit was caused by a page-fault during delivery of a
217 * contributary exception or a page-fault. */
218 bool fVectoringPF;
219} VMXTRANSIENT, *PVMXTRANSIENT;
220AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
221AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntrInfo, sizeof(uint64_t));
222AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntrInfo, sizeof(uint64_t));
223/** @} */
224
225
226/**
227 * MSR-bitmap read permissions.
228 */
229typedef enum VMXMSREXITREAD
230{
231 /** Reading this MSR causes a VM-exit. */
232 VMXMSREXIT_INTERCEPT_READ = 0xb,
233 /** Reading this MSR does not cause a VM-exit. */
234 VMXMSREXIT_PASSTHRU_READ
235} VMXMSREXITREAD;
236
237/**
238 * MSR-bitmap write permissions.
239 */
240typedef enum VMXMSREXITWRITE
241{
242 /** Writing to this MSR causes a VM-exit. */
243 VMXMSREXIT_INTERCEPT_WRITE = 0xd,
244 /** Writing to this MSR does not cause a VM-exit. */
245 VMXMSREXIT_PASSTHRU_WRITE
246} VMXMSREXITWRITE;
247
248
249/*******************************************************************************
250* Internal Functions *
251*******************************************************************************/
252static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush);
253static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr);
254static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
255 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState);
256#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
257static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
258#endif
259#ifndef HMVMX_USE_FUNCTION_TABLE
260DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
261#define HMVMX_EXIT_DECL static int
262#else
263#define HMVMX_EXIT_DECL static DECLCALLBACK(int)
264#endif
265
266HMVMX_EXIT_DECL hmR0VmxExitXcptNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
267HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
268HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
269HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
270HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
271HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
272HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
273HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
274HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
275HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
276HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
277HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
278HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
279HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
280HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
281HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
282HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
283HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
284HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
285HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
286HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
287HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
288HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
289HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
290HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
291HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
292HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
293HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
294HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
295HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
296HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
297HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
298HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
299HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
300HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
301HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
302HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
303HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
304HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
305HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
306HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
307HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
308HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
309HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
310
311static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
312static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
313static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
314static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
315static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
316static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
317static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
318
319
320/*******************************************************************************
321* Global Variables *
322*******************************************************************************/
323#ifdef HMVMX_USE_FUNCTION_TABLE
324/**
325 * VM-exit handler.
326 *
327 * @returns VBox status code.
328 * @param pVCpu Pointer to the VMCPU.
329 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
330 * out-of-sync. Make sure to update the required
331 * fields before using them.
332 * @param pVmxTransient Pointer to the VMX-transient structure.
333 */
334typedef DECLCALLBACK(int) FNVMEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
335/** Pointer to VM-exit handler. */
336typedef FNVMEXITHANDLER *const PFNVMEXITHANDLER;
337
338/**
339 * VMX_EXIT dispatch table.
340 */
341static const PFNVMEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
342{
343 /* 00 VMX_EXIT_XCPT_NMI */ hmR0VmxExitXcptNmi,
344 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
345 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
346 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
347 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
348 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
349 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
350 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
351 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
352 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
353 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
354 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
355 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
356 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
357 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
358 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
359 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
360 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
361 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitSetPendingXcptUD,
362 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
363 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
364 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
365 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
366 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
367 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
368 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
369 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
370 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
371 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
372 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
373 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
374 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
375 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
376 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
377 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
378 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
379 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
380 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
381 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
382 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
383 /* 40 UNDEFINED */ hmR0VmxExitPause,
384 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
385 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
386 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
387 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
388 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
389 /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
390 /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
391 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
392 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
393 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
394 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
395 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
396 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
397 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
398 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
399 /* 56 UNDEFINED */ hmR0VmxExitErrUndefined,
400 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
401 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
402 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD
403};
404#endif /* HMVMX_USE_FUNCTION_TABLE */
405
406#ifdef VBOX_STRICT
407static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
408{
409 /* 0 */ "(Not Used)",
410 /* 1 */ "VMCALL executed in VMX root operation.",
411 /* 2 */ "VMCLEAR with invalid physical address.",
412 /* 3 */ "VMCLEAR with VMXON pointer.",
413 /* 4 */ "VMLAUNCH with non-clear VMCS.",
414 /* 5 */ "VMRESUME with non-launched VMCS.",
415 /* 6 */ "VMRESUME after VMXOFF",
416 /* 7 */ "VM entry with invalid control fields.",
417 /* 8 */ "VM entry with invalid host state fields.",
418 /* 9 */ "VMPTRLD with invalid physical address.",
419 /* 10 */ "VMPTRLD with VMXON pointer.",
420 /* 11 */ "VMPTRLD with incorrect revision identifier.",
421 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
422 /* 13 */ "VMWRITE to read-only VMCS component.",
423 /* 14 */ "(Not Used)",
424 /* 15 */ "VMXON executed in VMX root operation.",
425 /* 16 */ "VM entry with invalid executive-VMCS pointer.",
426 /* 17 */ "VM entry with non-launched executing VMCS.",
427 /* 18 */ "VM entry with executive-VMCS pointer not VMXON pointer.",
428 /* 19 */ "VMCALL with non-clear VMCS.",
429 /* 20 */ "VMCALL with invalid VM-exit control fields.",
430 /* 21 */ "(Not Used)",
431 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
432 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
433 /* 24 */ "VMCALL with invalid SMM-monitor features.",
434 /* 25 */ "VM entry with invalid VM-execution control fields in executive VMCS.",
435 /* 26 */ "VM entry with events blocked by MOV SS.",
436 /* 27 */ "(Not Used)",
437 /* 28 */ "Invalid operand to INVEPT/INVVPID."
438};
439#endif /* VBOX_STRICT */
440
441
442
443/**
444 * Updates the VM's last error record. If there was a VMX instruction error,
445 * reads the error data from the VMCS and updates VCPU's last error record as
446 * well.
447 *
448 * @param pVM Pointer to the VM.
449 * @param pVCpu Pointer to the VMCPU (can be NULL if @a rc is not
450 * VERR_VMX_UNABLE_TO_START_VM or
451 * VERR_VMX_INVALID_VMCS_FIELD).
452 * @param rc The error code.
453 */
454static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
455{
456 AssertPtr(pVM);
457 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
458 || rc == VERR_VMX_UNABLE_TO_START_VM)
459 {
460 AssertPtrReturnVoid(pVCpu);
461 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.lasterror.u32InstrError);
462 }
463 pVM->hm.s.lLastError = rc;
464}
465
466
467/**
468 * Reads the VM-entry interruption-information field from the VMCS into the VMX
469 * transient structure.
470 *
471 * @returns VBox status code.
472 * @param pVmxTransient Pointer to the VMX transient structure.
473 *
474 * @remarks No-long-jump zone!!!
475 */
476DECLINLINE(int) hmR0VmxReadEntryIntrInfoVmcs(PVMXTRANSIENT pVmxTransient)
477{
478 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntrInfo);
479 AssertRCReturn(rc, rc);
480 return VINF_SUCCESS;
481}
482
483
484/**
485 * Reads the VM-entry exception error code field from the VMCS into
486 * the VMX transient structure.
487 *
488 * @returns VBox status code.
489 * @param pVmxTransient Pointer to the VMX transient structure.
490 *
491 * @remarks No-long-jump zone!!!
492 */
493DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
494{
495 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
496 AssertRCReturn(rc, rc);
497 return VINF_SUCCESS;
498}
499
500
501/**
502 * Reads the VM-entry exception error code field from the VMCS into
503 * the VMX transient structure.
504 *
505 * @returns VBox status code.
506 * @param pVCpu Pointer to the VMCPU.
507 * @param pVmxTransient Pointer to the VMX transient structure.
508 *
509 * @remarks No-long-jump zone!!!
510 */
511DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
512{
513 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
514 AssertRCReturn(rc, rc);
515 return VINF_SUCCESS;
516}
517
518
519/**
520 * Reads the VM-exit interruption-information field from the VMCS into the VMX
521 * transient structure.
522 *
523 * @returns VBox status code.
524 * @param pVCpu Pointer to the VMCPU.
525 * @param pVmxTransient Pointer to the VMX transient structure.
526 */
527DECLINLINE(int) hmR0VmxReadExitIntrInfoVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
528{
529 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
530 {
531 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntrInfo);
532 AssertRCReturn(rc, rc);
533 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;
534 }
535 return VINF_SUCCESS;
536}
537
538
539/**
540 * Reads the VM-exit interruption error code from the VMCS into the VMX
541 * transient structure.
542 *
543 * @returns VBox status code.
544 * @param pVCpu Pointer to the VMCPU.
545 * @param pVmxTransient Pointer to the VMX transient structure.
546 */
547DECLINLINE(int) hmR0VmxReadExitIntrErrorCodeVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
548{
549 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
550 {
551 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntrErrorCode);
552 AssertRCReturn(rc, rc);
553 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
554 }
555 return VINF_SUCCESS;
556}
557
558
559/**
560 * Reads the VM-exit instruction length field from the VMCS into the VMX
561 * transient structure.
562 *
563 * @returns VBox status code.
564 * @param pVCpu Pointer to the VMCPU.
565 * @param pVmxTransient Pointer to the VMX transient structure.
566 */
567DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
568{
569 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
570 {
571 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
572 AssertRCReturn(rc, rc);
573 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
574 }
575 return VINF_SUCCESS;
576}
577
578
579/**
580 * Reads the exit qualification from the VMCS into the VMX transient structure.
581 *
582 * @returns VBox status code.
583 * @param pVCpu Pointer to the VMCPU.
584 * @param pVmxTransient Pointer to the VMX transient structure.
585 */
586DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
587{
588 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
589 {
590 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification);
591 AssertRCReturn(rc, rc);
592 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION;
593 }
594 return VINF_SUCCESS;
595}
596
597
598/**
599 * Reads the IDT-vectoring information field from the VMCS into the VMX
600 * transient structure.
601 *
602 * @returns VBox status code.
603 * @param pVmxTransient Pointer to the VMX transient structure.
604 *
605 * @remarks No-long-jump zone!!!
606 */
607DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
608{
609 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO))
610 {
611 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
612 AssertRCReturn(rc, rc);
613 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO;
614 }
615 return VINF_SUCCESS;
616}
617
618
619/**
620 * Reads the IDT-vectoring error code from the VMCS into the VMX
621 * transient structure.
622 *
623 * @returns VBox status code.
624 * @param pVmxTransient Pointer to the VMX transient structure.
625 */
626DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
627{
628 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))
629 {
630 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
631 AssertRCReturn(rc, rc);
632 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;
633 }
634 return VINF_SUCCESS;
635}
636
637
638/**
639 * Enters VMX root mode operation on the current CPU.
640 *
641 * @returns VBox status code.
642 * @param pVM Pointer to the VM (optional, can be NULL, after
643 * a resume).
644 * @param HCPhysCpuPage Physical address of the VMXON region.
645 * @param pvCpuPage Pointer to the VMXON region.
646 */
647static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
648{
649 AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
650 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
651 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
652
653 if (pVM)
654 {
655 /* Write the VMCS revision dword to the VMXON region. */
656 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
657 }
658
659 /* Enable the VMX bit in CR4 if necessary. */
660 RTCCUINTREG uCr4 = ASMGetCR4();
661 if (!(uCr4 & X86_CR4_VMXE))
662 ASMSetCR4(uCr4 | X86_CR4_VMXE);
663
664 /* Enter VMX root mode. */
665 int rc = VMXEnable(HCPhysCpuPage);
666 if (RT_FAILURE(rc))
667 ASMSetCR4(uCr4);
668
669 return rc;
670}
671
672
673/**
674 * Exits VMX root mode operation on the current CPU.
675 *
676 * @returns VBox status code.
677 */
678static int hmR0VmxLeaveRootMode(void)
679{
680 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
681
682 /* If we're for some reason not in VMX root mode, then don't leave it. */
683 RTCCUINTREG uHostCR4 = ASMGetCR4();
684 if (uHostCR4 & X86_CR4_VMXE)
685 {
686 /* Exit VMX root mode and clear the VMX bit in CR4. */
687 VMXDisable();
688 ASMSetCR4(uHostCR4 & ~X86_CR4_VMXE);
689 return VINF_SUCCESS;
690 }
691
692 return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
693}
694
695
696/**
697 * Allocates and maps one physically contiguous page. The allocated page is
698 * zero'd out. (Used by various VT-x structures).
699 *
700 * @returns IPRT status code.
701 * @param pMemObj Pointer to the ring-0 memory object.
702 * @param ppVirt Where to store the virtual address of the
703 * allocation.
704 * @param pPhys Where to store the physical address of the
705 * allocation.
706 */
707DECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
708{
709 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
710 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
711 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
712
713 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
714 if (RT_FAILURE(rc))
715 return rc;
716 *ppVirt = RTR0MemObjAddress(*pMemObj);
717 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
718 ASMMemZero32(*ppVirt, PAGE_SIZE);
719 return VINF_SUCCESS;
720}
721
722
723/**
724 * Frees and unmaps an allocated physical page.
725 *
726 * @param pMemObj Pointer to the ring-0 memory object.
727 * @param ppVirt Where to re-initialize the virtual address of
728 * allocation as 0.
729 * @param pHCPhys Where to re-initialize the physical address of the
730 * allocation as 0.
731 */
732DECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
733{
734 AssertPtr(pMemObj);
735 AssertPtr(ppVirt);
736 AssertPtr(pHCPhys);
737 if (*pMemObj != NIL_RTR0MEMOBJ)
738 {
739 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
740 AssertRC(rc);
741 *pMemObj = NIL_RTR0MEMOBJ;
742 *ppVirt = 0;
743 *pHCPhys = 0;
744 }
745}
746
747
748/**
749 * Worker function to free VT-x related structures.
750 *
751 * @returns IPRT status code.
752 * @param pVM Pointer to the VM.
753 */
754static void hmR0VmxStructsFree(PVM pVM)
755{
756 for (VMCPUID i = 0; i < pVM->cCpus; i++)
757 {
758 PVMCPU pVCpu = &pVM->aCpus[i];
759 AssertPtr(pVCpu);
760
761#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
762 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
763 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
764#endif
765
766 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
767 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
768
769 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
770 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
771 }
772
773 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
774#ifdef VBOX_WITH_CRASHDUMP_MAGIC
775 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
776#endif
777}
778
779
780/**
781 * Worker function to allocate VT-x related VM structures.
782 *
783 * @returns IPRT status code.
784 * @param pVM Pointer to the VM.
785 */
786static int hmR0VmxStructsAlloc(PVM pVM)
787{
788 /*
789 * Initialize members up-front so we can cleanup properly on allocation failure.
790 */
791#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
792 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
793 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
794 pVM->hm.s.vmx.HCPhys##a_Name = 0;
795
796#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
797 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
798 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
799 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
800
801#ifdef VBOX_WITH_CRASHDUMP_MAGIC
802 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
803#endif
804 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
805
806 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
807 for (VMCPUID i = 0; i < pVM->cCpus; i++)
808 {
809 PVMCPU pVCpu = &pVM->aCpus[i];
810 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
811 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
812 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
813#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
814 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
815 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
816#endif
817 }
818#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
819#undef VMXLOCAL_INIT_VM_MEMOBJ
820
821 /*
822 * Allocate all the VT-x structures.
823 */
824 int rc = VINF_SUCCESS;
825#ifdef VBOX_WITH_CRASHDUMP_MAGIC
826 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
827 if (RT_FAILURE(rc))
828 goto cleanup;
829 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
830 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
831#endif
832
833 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
834 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
835 {
836 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
837 &pVM->hm.s.vmx.HCPhysApicAccess);
838 if (RT_FAILURE(rc))
839 goto cleanup;
840 }
841
842 /*
843 * Initialize per-VCPU VT-x structures.
844 */
845 for (VMCPUID i = 0; i < pVM->cCpus; i++)
846 {
847 PVMCPU pVCpu = &pVM->aCpus[i];
848 AssertPtr(pVCpu);
849
850 /* Allocate the VM control structure (VMCS). */
851 AssertReturn(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.vmx_basic_info) <= PAGE_SIZE, VERR_INTERNAL_ERROR);
852 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
853 if (RT_FAILURE(rc))
854 goto cleanup;
855
856 /* Allocate the Virtual-APIC page for transparent TPR accesses. */
857 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
858 {
859 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
860 &pVCpu->hm.s.vmx.HCPhysVirtApic);
861 if (RT_FAILURE(rc))
862 goto cleanup;
863 }
864
865 /* Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for transparent accesses of specific MSRs. */
866 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
867 {
868 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
869 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
870 if (RT_FAILURE(rc))
871 goto cleanup;
872 memset(pVCpu->hm.s.vmx.pvMsrBitmap, 0xff, PAGE_SIZE);
873 }
874
875#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
876 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
877 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
878 if (RT_FAILURE(rc))
879 goto cleanup;
880
881 /* Allocate the VM-exit MSR-load page for the host MSRs. */
882 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
883 if (RT_FAILURE(rc))
884 goto cleanup;
885#endif
886 }
887
888 return VINF_SUCCESS;
889
890cleanup:
891 hmR0VmxStructsFree(pVM);
892 return rc;
893}
894
895
896/**
897 * Does global VT-x initialization (called during module initialization).
898 *
899 * @returns VBox status code.
900 */
901VMMR0DECL(int) VMXR0GlobalInit(void)
902{
903#ifdef HMVMX_USE_FUNCTION_TABLE
904 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
905# ifdef VBOX_STRICT
906 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
907 Assert(g_apfnVMExitHandlers[i]);
908# endif
909#endif
910 return VINF_SUCCESS;
911}
912
913
914/**
915 * Does global VT-x termination (called during module termination).
916 */
917VMMR0DECL(void) VMXR0GlobalTerm()
918{
919 /* Nothing to do currently. */
920}
921
922
923/**
924 * Sets up and activates VT-x on the current CPU.
925 *
926 * @returns VBox status code.
927 * @param pCpu Pointer to the global CPU info struct.
928 * @param pVM Pointer to the VM (can be NULL after a host resume
929 * operation).
930 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
931 * fEnabledByHost is true).
932 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
933 * @a fEnabledByHost is true).
934 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
935 * enable VT-x on the host.
936 */
937VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
938{
939 AssertReturn(pCpu, VERR_INVALID_PARAMETER);
940 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
941
942 if (!fEnabledByHost)
943 {
944 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
945 if (RT_FAILURE(rc))
946 return rc;
947 }
948
949 /*
950 * Flush all EPTP tagged-TLB entries (in case any other hypervisor have been using EPTPs) so that
951 * we can avoid an explicit flush while using new VPIDs. We would still need to flush
952 * each time while reusing a VPID after hitting the MaxASID limit once.
953 */
954 if ( pVM
955 && pVM->hm.s.fNestedPaging)
956 {
957 /* We require ALL_CONTEXT flush-type to be available on the CPU. See hmR0VmxSetupTaggedTlb(). */
958 Assert(pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
959 hmR0VmxFlushEpt(pVM, NULL /* pVCpu */, VMX_FLUSH_EPT_ALL_CONTEXTS);
960 pCpu->fFlushAsidBeforeUse = false;
961 }
962 else
963 {
964 /** @todo This is still not perfect. If on host resume (pVM is NULL or a VM
965 * without Nested Paging triggered this function) we still have the risk
966 * of potentially running with stale TLB-entries from other hypervisors
967 * when later we use a VM with NestedPaging. To fix this properly we will
968 * have to pass '&g_HvmR0' (see HMR0.cpp) to this function and read
969 * 'vmx_ept_vpid_caps' from it. Sigh. */
970 pCpu->fFlushAsidBeforeUse = true;
971 }
972
973 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
974 ++pCpu->cTlbFlushes;
975
976 return VINF_SUCCESS;
977}
978
979
980/**
981 * Deactivates VT-x on the current CPU.
982 *
983 * @returns VBox status code.
984 * @param pCpu Pointer to the global CPU info struct.
985 * @param pvCpuPage Pointer to the VMXON region.
986 * @param HCPhysCpuPage Physical address of the VMXON region.
987 *
988 * @remarks This function should never be called when SUPR0EnableVTx() or
989 * similar was used to enable VT-x on the host.
990 */
991VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
992{
993 NOREF(pCpu);
994 NOREF(pvCpuPage);
995 NOREF(HCPhysCpuPage);
996
997 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
998 return hmR0VmxLeaveRootMode();
999}
1000
1001
1002/**
1003 * Sets the permission bits for the specified MSR in the MSR bitmap.
1004 *
1005 * @param pVCpu Pointer to the VMCPU.
1006 * @param uMSR The MSR value.
1007 * @param enmRead Whether reading this MSR causes a VM-exit.
1008 * @param enmWrite Whether writing this MSR causes a VM-exit.
1009 */
1010static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
1011{
1012 int32_t iBit;
1013 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1014
1015 /*
1016 * Layout:
1017 * 0x000 - 0x3ff - Low MSR read bits
1018 * 0x400 - 0x7ff - High MSR read bits
1019 * 0x800 - 0xbff - Low MSR write bits
1020 * 0xc00 - 0xfff - High MSR write bits
1021 */
1022 if (uMsr <= 0x00001FFF)
1023 iBit = uMsr;
1024 else if ( uMsr >= 0xC0000000
1025 && uMsr <= 0xC0001FFF)
1026 {
1027 iBit = (uMsr - 0xC0000000);
1028 pbMsrBitmap += 0x400;
1029 }
1030 else
1031 {
1032 AssertMsgFailed(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1033 return;
1034 }
1035
1036 Assert(iBit <= 0x1fff);
1037 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
1038 ASMBitSet(pbMsrBitmap, iBit);
1039 else
1040 ASMBitClear(pbMsrBitmap, iBit);
1041
1042 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
1043 ASMBitSet(pbMsrBitmap + 0x800, iBit);
1044 else
1045 ASMBitClear(pbMsrBitmap + 0x800, iBit);
1046}
1047
1048
1049/**
1050 * Flushes the TLB using EPT.
1051 *
1052 * @returns VBox status code.
1053 * @param pVM Pointer to the VM.
1054 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1055 * enmFlush).
1056 * @param enmFlush Type of flush.
1057 */
1058static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush)
1059{
1060 AssertPtr(pVM);
1061 Assert(pVM->hm.s.fNestedPaging);
1062
1063 uint64_t descriptor[2];
1064 if (enmFlush == VMX_FLUSH_EPT_ALL_CONTEXTS)
1065 descriptor[0] = 0;
1066 else
1067 {
1068 Assert(pVCpu);
1069 descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1070 }
1071 descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
1072
1073 int rc = VMXR0InvEPT(enmFlush, &descriptor[0]);
1074 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0,
1075 rc));
1076 if ( RT_SUCCESS(rc)
1077 && pVCpu)
1078 {
1079 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1080 }
1081}
1082
1083
1084/**
1085 * Flushes the TLB using VPID.
1086 *
1087 * @returns VBox status code.
1088 * @param pVM Pointer to the VM.
1089 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1090 * enmFlush).
1091 * @param enmFlush Type of flush.
1092 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1093 * on @a enmFlush).
1094 */
1095static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr)
1096{
1097 AssertPtr(pVM);
1098 Assert(pVM->hm.s.vmx.fVpid);
1099
1100 uint64_t descriptor[2];
1101 if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS)
1102 {
1103 descriptor[0] = 0;
1104 descriptor[1] = 0;
1105 }
1106 else
1107 {
1108 AssertPtr(pVCpu);
1109 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1110 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1111 descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1112 descriptor[1] = GCPtr;
1113 }
1114
1115 int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc);
1116 AssertMsg(rc == VINF_SUCCESS,
1117 ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1118 if ( RT_SUCCESS(rc)
1119 && pVCpu)
1120 {
1121 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1122 }
1123}
1124
1125
1126/**
1127 * Invalidates a guest page by guest virtual address. Only relevant for
1128 * EPT/VPID, otherwise there is nothing really to invalidate.
1129 *
1130 * @returns VBox status code.
1131 * @param pVM Pointer to the VM.
1132 * @param pVCpu Pointer to the VMCPU.
1133 * @param GCVirt Guest virtual address of the page to invalidate.
1134 */
1135VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
1136{
1137 AssertPtr(pVM);
1138 AssertPtr(pVCpu);
1139 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
1140
1141 bool fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1142 if (!fFlushPending)
1143 {
1144 /*
1145 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
1146 * See @bugref{6043} and @bugref{6177}.
1147 *
1148 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
1149 * function maybe called in a loop with individual addresses.
1150 */
1151 if (pVM->hm.s.vmx.fVpid)
1152 {
1153 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1154 {
1155 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt);
1156 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1157 }
1158 else
1159 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1160 }
1161 else if (pVM->hm.s.fNestedPaging)
1162 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1163 }
1164
1165 return VINF_SUCCESS;
1166}
1167
1168
1169/**
1170 * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
1171 * otherwise there is nothing really to invalidate.
1172 *
1173 * @returns VBox status code.
1174 * @param pVM Pointer to the VM.
1175 * @param pVCpu Pointer to the VMCPU.
1176 * @param GCPhys Guest physical address of the page to invalidate.
1177 */
1178VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1179{
1180 LogFlowFunc(("%RGp\n", GCPhys));
1181
1182 /*
1183 * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
1184 * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
1185 * This function might be called in a loop. This should cause a flush-by-EPT if EPT is in use. See @bugref{6568}.
1186 */
1187 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1188 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
1189 return VINF_SUCCESS;
1190}
1191
1192
1193/**
1194 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1195 * case where neither EPT nor VPID is supported by the CPU.
1196 *
1197 * @param pVM Pointer to the VM.
1198 * @param pVCpu Pointer to the VMCPU.
1199 *
1200 * @remarks Called with interrupts disabled.
1201 */
1202static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu)
1203{
1204 NOREF(pVM);
1205 AssertPtr(pVCpu);
1206 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1207 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1208
1209 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1210 AssertPtr(pCpu);
1211
1212 pVCpu->hm.s.TlbShootdown.cPages = 0;
1213 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1214 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1215 pVCpu->hm.s.fForceTLBFlush = false;
1216 return;
1217}
1218
1219
1220/**
1221 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1222 *
1223 * @param pVM Pointer to the VM.
1224 * @param pVCpu Pointer to the VMCPU.
1225 * @remarks All references to "ASID" in this function pertains to "VPID" in
1226 * Intel's nomenclature. The reason is, to avoid confusion in compare
1227 * statements since the host-CPU copies are named "ASID".
1228 *
1229 * @remarks Called with interrupts disabled.
1230 */
1231static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu)
1232{
1233#ifdef VBOX_WITH_STATISTICS
1234 bool fTlbFlushed = false;
1235# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0)
1236# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \
1237 if (!fTlbFlushed) \
1238 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
1239 } while (0)
1240#else
1241# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0)
1242# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0)
1243#endif
1244
1245 AssertPtr(pVM);
1246 AssertPtr(pVCpu);
1247 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1248 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1249 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1250
1251 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1252 AssertPtr(pCpu);
1253
1254 /*
1255 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1256 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1257 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1258 */
1259 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1260 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1261 {
1262 ++pCpu->uCurrentAsid;
1263 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1264 {
1265 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */
1266 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1267 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1268 }
1269
1270 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1271 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1272 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1273
1274 /*
1275 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
1276 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
1277 */
1278 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1279 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1280 HMVMX_SET_TAGGED_TLB_FLUSHED();
1281 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); /* Already flushed-by-EPT, skip doing it again below. */
1282 }
1283
1284 /* Check for explicit TLB shootdowns. */
1285 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1286 {
1287 /*
1288 * Changes to the EPT paging structure by VMM requires flushing by EPT as the CPU creates
1289 * guest-physical (only EPT-tagged) mappings while traversing the EPT tables when EPT is in use.
1290 * Flushing by VPID will only flush linear (only VPID-tagged) and combined (EPT+VPID tagged) mappings
1291 * but not guest-physical mappings.
1292 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". See @bugref{6568}.
1293 */
1294 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1295 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1296 HMVMX_SET_TAGGED_TLB_FLUSHED();
1297 }
1298
1299 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1300 * not be executed. See hmQueueInvlPage() where it is commented
1301 * out. Support individual entry flushing someday. */
1302 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1303 {
1304 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1305
1306 /*
1307 * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
1308 * as supported by the CPU.
1309 */
1310 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1311 {
1312 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1313 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1314 }
1315 else
1316 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1317
1318 HMVMX_SET_TAGGED_TLB_FLUSHED();
1319 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1320 }
1321
1322 pVCpu->hm.s.TlbShootdown.cPages = 0;
1323 pVCpu->hm.s.fForceTLBFlush = false;
1324
1325 HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
1326
1327 Assert(pVCpu->hm.s.idLastCpu == pCpu->idCpu);
1328 Assert(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes);
1329 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1330 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1331 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1332 ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
1333 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1334 ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1335
1336 /* Update VMCS with the VPID. */
1337 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1338 AssertRC(rc);
1339
1340#undef HMVMX_SET_TAGGED_TLB_FLUSHED
1341}
1342
1343
1344/**
1345 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
1346 *
1347 * @returns VBox status code.
1348 * @param pVM Pointer to the VM.
1349 * @param pVCpu Pointer to the VMCPU.
1350 *
1351 * @remarks Called with interrupts disabled.
1352 */
1353static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu)
1354{
1355 AssertPtr(pVM);
1356 AssertPtr(pVCpu);
1357 AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
1358 AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
1359
1360 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1361 AssertPtr(pCpu);
1362
1363 /*
1364 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1365 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
1366 */
1367 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1368 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1369 {
1370 pVCpu->hm.s.fForceTLBFlush = true;
1371 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1372 }
1373
1374 /* Check for explicit TLB shootdown flushes. */
1375 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1376 {
1377 pVCpu->hm.s.fForceTLBFlush = true;
1378 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1379 }
1380
1381 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1382 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1383
1384 if (pVCpu->hm.s.fForceTLBFlush)
1385 {
1386 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1387 pVCpu->hm.s.fForceTLBFlush = false;
1388 }
1389 else
1390 {
1391 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1392 * not be executed. See hmQueueInvlPage() where it is commented
1393 * out. Support individual entry flushing someday. */
1394 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1395 {
1396 /* We cannot flush individual entries without VPID support. Flush using EPT. */
1397 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1398 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1399 }
1400 else
1401 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1402 }
1403
1404 pVCpu->hm.s.TlbShootdown.cPages = 0;
1405 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1406}
1407
1408
1409/**
1410 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
1411 *
1412 * @returns VBox status code.
1413 * @param pVM Pointer to the VM.
1414 * @param pVCpu Pointer to the VMCPU.
1415 *
1416 * @remarks Called with interrupts disabled.
1417 */
1418static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu)
1419{
1420 AssertPtr(pVM);
1421 AssertPtr(pVCpu);
1422 AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
1423 AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
1424
1425 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1426
1427 /*
1428 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
1429 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1430 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1431 */
1432 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1433 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1434 {
1435 pVCpu->hm.s.fForceTLBFlush = true;
1436 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1437 }
1438
1439 /* Check for explicit TLB shootdown flushes. */
1440 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1441 {
1442 /*
1443 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
1444 * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
1445 * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
1446 */
1447 pVCpu->hm.s.fForceTLBFlush = true;
1448 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1449 }
1450
1451 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1452 if (pVCpu->hm.s.fForceTLBFlush)
1453 {
1454 ++pCpu->uCurrentAsid;
1455 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1456 {
1457 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */
1458 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1459 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1460 }
1461
1462 pVCpu->hm.s.fForceTLBFlush = false;
1463 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1464 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1465 if (pCpu->fFlushAsidBeforeUse)
1466 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1467 }
1468 else
1469 {
1470 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
1471 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
1472 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
1473 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
1474
1475 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1476 * not be executed. See hmQueueInvlPage() where it is commented
1477 * out. Support individual entry flushing someday. */
1478 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1479 {
1480 /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */
1481 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1482 {
1483 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1484 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1485 }
1486 else
1487 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1488 }
1489 else
1490 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1491 }
1492
1493 pVCpu->hm.s.TlbShootdown.cPages = 0;
1494 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1495
1496 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1497 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1498 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1499 ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
1500 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1501 ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1502
1503 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1504 AssertRC(rc);
1505}
1506
1507
1508/**
1509 * Flushes the guest TLB entry based on CPU capabilities.
1510 *
1511 * @param pVCpu Pointer to the VMCPU.
1512 */
1513DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu)
1514{
1515 PVM pVM = pVCpu->CTX_SUFF(pVM);
1516 switch (pVM->hm.s.vmx.uFlushTaggedTlb)
1517 {
1518 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu); break;
1519 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu); break;
1520 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu); break;
1521 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu); break;
1522 default:
1523 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
1524 break;
1525 }
1526}
1527
1528
1529/**
1530 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
1531 * TLB entries from the host TLB before VM-entry.
1532 *
1533 * @returns VBox status code.
1534 * @param pVM Pointer to the VM.
1535 */
1536static int hmR0VmxSetupTaggedTlb(PVM pVM)
1537{
1538 /*
1539 * Determine optimal flush type for Nested Paging.
1540 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
1541 * guest execution (see hmR3InitFinalizeR0()).
1542 */
1543 if (pVM->hm.s.fNestedPaging)
1544 {
1545 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
1546 {
1547 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
1548 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_SINGLE_CONTEXT;
1549 else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1550 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_ALL_CONTEXTS;
1551 else
1552 {
1553 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
1554 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1555 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1556 }
1557
1558 /* Make sure the write-back cacheable memory type for EPT is supported. */
1559 if (!(pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))
1560 {
1561 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.msr.vmx_ept_vpid_caps));
1562 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1563 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1564 }
1565 }
1566 else
1567 {
1568 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
1569 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1570 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1571 }
1572 }
1573
1574 /*
1575 * Determine optimal flush type for VPID.
1576 */
1577 if (pVM->hm.s.vmx.fVpid)
1578 {
1579 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
1580 {
1581 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
1582 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT;
1583 else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
1584 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_ALL_CONTEXTS;
1585 else
1586 {
1587 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
1588 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1589 LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
1590 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
1591 LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
1592 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
1593 pVM->hm.s.vmx.fVpid = false;
1594 }
1595 }
1596 else
1597 {
1598 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
1599 Log4(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
1600 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
1601 pVM->hm.s.vmx.fVpid = false;
1602 }
1603 }
1604
1605 /*
1606 * Setup the handler for flushing tagged-TLBs.
1607 */
1608 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
1609 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT_VPID;
1610 else if (pVM->hm.s.fNestedPaging)
1611 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT;
1612 else if (pVM->hm.s.vmx.fVpid)
1613 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_VPID;
1614 else
1615 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_NONE;
1616 return VINF_SUCCESS;
1617}
1618
1619
1620/**
1621 * Sets up pin-based VM-execution controls in the VMCS.
1622 *
1623 * @returns VBox status code.
1624 * @param pVM Pointer to the VM.
1625 * @param pVCpu Pointer to the VMCPU.
1626 */
1627static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
1628{
1629 AssertPtr(pVM);
1630 AssertPtr(pVCpu);
1631
1632 uint32_t val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0; /* Bits set here must always be set. */
1633 uint32_t zap = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1; /* Bits cleared here must always be cleared. */
1634
1635 val |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts causes a VM-exits. */
1636 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts causes a VM-exit. */
1637 Assert(!(val & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI));
1638
1639 /* Enable the VMX preemption timer. */
1640 if (pVM->hm.s.vmx.fUsePreemptTimer)
1641 {
1642 Assert(pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
1643 val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
1644 }
1645
1646 if ((val & zap) != val)
1647 {
1648 LogRel(("hmR0VmxSetupPinCtls: invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
1649 pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0, val, zap));
1650 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1651 }
1652
1653 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, val);
1654 AssertRCReturn(rc, rc);
1655
1656 /* Update VCPU with the currently set pin-based VM-execution controls. */
1657 pVCpu->hm.s.vmx.u32PinCtls = val;
1658 return rc;
1659}
1660
1661
1662/**
1663 * Sets up processor-based VM-execution controls in the VMCS.
1664 *
1665 * @returns VBox status code.
1666 * @param pVM Pointer to the VM.
1667 * @param pVMCPU Pointer to the VMCPU.
1668 */
1669static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
1670{
1671 AssertPtr(pVM);
1672 AssertPtr(pVCpu);
1673
1674 int rc = VERR_INTERNAL_ERROR_5;
1675 uint32_t val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0; /* Bits set here must be set in the VMCS. */
1676 uint32_t zap = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1677
1678 val |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */
1679 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
1680 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
1681 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
1682 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */
1683 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */
1684 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
1685
1686 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
1687 if ( !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
1688 || (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
1689 {
1690 LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
1691 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1692 }
1693
1694 /* Without Nested Paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
1695 if (!pVM->hm.s.fNestedPaging)
1696 {
1697 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
1698 val |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
1699 | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
1700 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
1701 }
1702
1703 /* Use TPR shadowing if supported by the CPU. */
1704 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
1705 {
1706 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
1707 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
1708 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
1709 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
1710 AssertRCReturn(rc, rc);
1711
1712 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
1713 /* CR8 writes causes a VM-exit based on TPR threshold. */
1714 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));
1715 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));
1716 }
1717 else
1718 {
1719 val |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT /* CR8 reads causes a VM-exit. */
1720 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes causes a VM-exit. */
1721 }
1722
1723 /* Use MSR-bitmaps if supported by the CPU. */
1724 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1725 {
1726 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
1727
1728 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1729 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
1730 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1731 AssertRCReturn(rc, rc);
1732
1733 /*
1734 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
1735 * automatically (either as part of the MSR-load/store areas or dedicated fields in the VMCS).
1736 */
1737 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1738 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1739 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1740 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1741 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1742 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1743 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1744 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1745 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1746 }
1747
1748 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
1749 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1750 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
1751
1752 if ((val & zap) != val)
1753 {
1754 LogRel(("hmR0VmxSetupProcCtls: invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
1755 pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0, val, zap));
1756 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1757 }
1758
1759 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, val);
1760 AssertRCReturn(rc, rc);
1761
1762 /* Update VCPU with the currently set processor-based VM-execution controls. */
1763 pVCpu->hm.s.vmx.u32ProcCtls = val;
1764
1765 /*
1766 * Secondary processor-based VM-execution controls.
1767 */
1768 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
1769 {
1770 val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
1771 zap = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1772
1773 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
1774 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */
1775
1776 if (pVM->hm.s.fNestedPaging)
1777 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */
1778 else
1779 {
1780 /*
1781 * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
1782 * VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT when INVPCID is executed by the guest.
1783 * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
1784 */
1785 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
1786 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
1787 }
1788
1789 if (pVM->hm.s.vmx.fVpid)
1790 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */
1791
1792 if (pVM->hm.s.vmx.fUnrestrictedGuest)
1793 val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */
1794
1795 /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
1796 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
1797 * done dynamically. */
1798 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
1799 {
1800 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
1801 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
1802 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
1803 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
1804 AssertRCReturn(rc, rc);
1805 }
1806
1807 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
1808 {
1809 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */
1810 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1811 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1812 }
1813
1814 if ((val & zap) != val)
1815 {
1816 LogRel(("hmR0VmxSetupProcCtls: invalid secondary processor-based VM-execution controls combo! "
1817 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0, val, zap));
1818 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1819 }
1820
1821 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, val);
1822 AssertRCReturn(rc, rc);
1823
1824 /* Update VCPU with the currently set secondary processor-based VM-execution controls. */
1825 pVCpu->hm.s.vmx.u32ProcCtls2 = val;
1826 }
1827
1828 return VINF_SUCCESS;
1829}
1830
1831
1832/**
1833 * Sets up miscellaneous (everything other than Pin & Processor-based
1834 * VM-execution) control fields in the VMCS.
1835 *
1836 * @returns VBox status code.
1837 * @param pVM Pointer to the VM.
1838 * @param pVCpu Pointer to the VMCPU.
1839 */
1840static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
1841{
1842 AssertPtr(pVM);
1843 AssertPtr(pVCpu);
1844
1845 int rc = VERR_GENERAL_FAILURE;
1846
1847 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
1848#if 0
1849 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestControlRegs())*/
1850 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); AssertRCReturn(rc, rc);
1851 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); AssertRCReturn(rc, rc);
1852
1853 /*
1854 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
1855 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
1856 * We thus use the exception bitmap to control it rather than use both.
1857 */
1858 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0); AssertRCReturn(rc, rc);
1859 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0); AssertRCReturn(rc, rc);
1860
1861 /** @todo Explore possibility of using IO-bitmaps. */
1862 /* All IO & IOIO instructions cause VM-exits. */
1863 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); AssertRCReturn(rc, rc);
1864 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); AssertRCReturn(rc, rc);
1865
1866 /* Initialize the MSR-bitmap area. */
1867 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
1868 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); AssertRCReturn(rc, rc);
1869 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
1870#endif
1871
1872#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
1873 /* Setup MSR autoloading/storing. */
1874 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
1875 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
1876 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
1877 AssertRCReturn(rc, rc);
1878 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
1879 AssertRCReturn(rc, rc);
1880
1881 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
1882 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
1883 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
1884 AssertRCReturn(rc, rc);
1885#endif
1886
1887 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
1888 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
1889 AssertRCReturn(rc, rc);
1890
1891 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
1892#if 0
1893 /* Setup debug controls */
1894 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */
1895 AssertRCReturn(rc, rc);
1896 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
1897 AssertRCReturn(rc, rc);
1898#endif
1899
1900 return rc;
1901}
1902
1903
1904/**
1905 * Sets up the initial exception bitmap in the VMCS based on static conditions
1906 * (i.e. conditions that cannot ever change at runtime).
1907 *
1908 * @returns VBox status code.
1909 * @param pVM Pointer to the VM.
1910 * @param pVCpu Pointer to the VMCPU.
1911 */
1912static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
1913{
1914 AssertPtr(pVM);
1915 AssertPtr(pVCpu);
1916
1917 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
1918
1919 uint32_t u32XcptBitmap = 0;
1920
1921 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
1922 if (!pVM->hm.s.fNestedPaging)
1923 u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
1924
1925 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
1926 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
1927 AssertRCReturn(rc, rc);
1928 return rc;
1929}
1930
1931
1932/**
1933 * Sets up the initial guest-state mask. The guest-state mask is consulted
1934 * before reading guest-state fields from the VMCS as VMREADs can be expensive
1935 * for the nested virtualization case (as it would cause a VM-exit).
1936 *
1937 * @param pVCpu Pointer to the VMCPU.
1938 */
1939static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
1940{
1941 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
1942 pVCpu->hm.s.vmx.fUpdatedGuestState = HMVMX_UPDATED_GUEST_ALL;
1943 return VINF_SUCCESS;
1944}
1945
1946
1947/**
1948 * Does per-VM VT-x initialization.
1949 *
1950 * @returns VBox status code.
1951 * @param pVM Pointer to the VM.
1952 */
1953VMMR0DECL(int) VMXR0InitVM(PVM pVM)
1954{
1955 LogFlowFunc(("pVM=%p\n", pVM));
1956
1957 int rc = hmR0VmxStructsAlloc(pVM);
1958 if (RT_FAILURE(rc))
1959 {
1960 LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
1961 return rc;
1962 }
1963
1964 return VINF_SUCCESS;
1965}
1966
1967
1968/**
1969 * Does per-VM VT-x termination.
1970 *
1971 * @returns VBox status code.
1972 * @param pVM Pointer to the VM.
1973 */
1974VMMR0DECL(int) VMXR0TermVM(PVM pVM)
1975{
1976 LogFlowFunc(("pVM=%p\n", pVM));
1977
1978#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1979 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
1980 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
1981#endif
1982 hmR0VmxStructsFree(pVM);
1983 return VINF_SUCCESS;
1984}
1985
1986
1987/**
1988 * Sets up the VM for execution under VT-x.
1989 * This function is only called once per-VM during initalization.
1990 *
1991 * @returns VBox status code.
1992 * @param pVM Pointer to the VM.
1993 */
1994VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
1995{
1996 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
1997 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1998
1999 LogFlowFunc(("pVM=%p\n", pVM));
2000
2001 /*
2002 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
2003 * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0().
2004 */
2005 /* -XXX- change hmR3InitFinalizeR0Intel() to fail if pRealModeTSS alloc fails. */
2006 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
2007 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
2008 || !pVM->hm.s.vmx.pRealModeTSS))
2009 {
2010 LogRel(("VMXR0SetupVM: invalid real-on-v86 state.\n"));
2011 return VERR_INTERNAL_ERROR;
2012 }
2013
2014 /* Initialize these always, see hmR3InitFinalizeR0().*/
2015 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NONE;
2016 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NONE;
2017
2018 /* Setup the tagged-TLB flush handlers. */
2019 int rc = hmR0VmxSetupTaggedTlb(pVM);
2020 if (RT_FAILURE(rc))
2021 {
2022 LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
2023 return rc;
2024 }
2025
2026 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2027 {
2028 PVMCPU pVCpu = &pVM->aCpus[i];
2029 AssertPtr(pVCpu);
2030 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
2031
2032 /* Log the VCPU pointers, useful for debugging SMP VMs. */
2033 Log4(("VMXR0SetupVM: pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
2034
2035 /* Set revision dword at the beginning of the VMCS structure. */
2036 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
2037
2038 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
2039 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
2040 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2041 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2042
2043 /* Load this VMCS as the current VMCS. */
2044 rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
2045 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2046 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2047
2048 rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
2049 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2050 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2051
2052 rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
2053 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2054 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2055
2056 rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
2057 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2058 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2059
2060 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
2061 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2062 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2063
2064 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
2065 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2066 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2067
2068#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2069 rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
2070 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2071 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2072#endif
2073
2074 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
2075 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
2076 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2077 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2078
2079 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
2080 }
2081
2082 return VINF_SUCCESS;
2083}
2084
2085
2086/**
2087 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
2088 * the VMCS.
2089 *
2090 * @returns VBox status code.
2091 * @param pVM Pointer to the VM.
2092 * @param pVCpu Pointer to the VMCPU.
2093 */
2094DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
2095{
2096 RTCCUINTREG uReg = ASMGetCR0();
2097 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
2098 AssertRCReturn(rc, rc);
2099
2100#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2101 /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */
2102 if (HMVMX_IS_64BIT_HOST_MODE())
2103 {
2104 uint64_t uRegCR3 = HMR0Get64bitCR3();
2105 rc = VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uRegCR3);
2106 }
2107 else
2108#endif
2109 {
2110 uReg = ASMGetCR3();
2111 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2112 }
2113 AssertRCReturn(rc, rc);
2114
2115 uReg = ASMGetCR4();
2116 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2117 AssertRCReturn(rc, rc);
2118 return rc;
2119}
2120
2121
2122/**
2123 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2124 * the host-state area in the VMCS.
2125 *
2126 * @returns VBox status code.
2127 * @param pVM Pointer to the VM.
2128 * @param pVCpu Pointer to the VMCPU.
2129 */
2130DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
2131{
2132 int rc = VERR_INTERNAL_ERROR_5;
2133 RTSEL uSelDS = 0;
2134 RTSEL uSelES = 0;
2135 RTSEL uSelFS = 0;
2136 RTSEL uSelGS = 0;
2137 RTSEL uSelTR = 0;
2138
2139 /*
2140 * Host DS, ES, FS and GS segment registers.
2141 */
2142#if HC_ARCH_BITS == 64
2143 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
2144 uSelDS = ASMGetDS();
2145 uSelES = ASMGetES();
2146 uSelFS = ASMGetFS();
2147 uSelGS = ASMGetGS();
2148#endif
2149
2150 /*
2151 * Host CS and SS segment registers.
2152 */
2153 RTSEL uSelCS;
2154 RTSEL uSelSS;
2155#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2156 if (HMVMX_IS_64BIT_HOST_MODE())
2157 {
2158 uSelCS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS;
2159 uSelSS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelSS;
2160 }
2161 else
2162 {
2163 /* Seems darwin uses the LDT (TI flag is set) in the CS & SS selectors which VT-x doesn't like. */
2164 uSelCS = (RTSEL)(uintptr_t)&SUPR0AbsKernelCS;
2165 uSelSS = (RTSEL)(uintptr_t)&SUPR0AbsKernelSS;
2166 }
2167#else
2168 uSelCS = ASMGetCS();
2169 uSelSS = ASMGetSS();
2170#endif
2171
2172 /*
2173 * Host TR segment register.
2174 */
2175 uSelTR = ASMGetTR();
2176
2177#if HC_ARCH_BITS == 64
2178 /*
2179 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to gain VM-entry and restore them
2180 * before we get preempted. See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
2181 */
2182 if (uSelDS & (X86_SEL_RPL | X86_SEL_LDT))
2183 {
2184 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_DS;
2185 pVCpu->hm.s.vmx.RestoreHost.uHostSelDS = uSelDS;
2186 uSelDS = 0;
2187 }
2188 if (uSelES & (X86_SEL_RPL | X86_SEL_LDT))
2189 {
2190 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_ES;
2191 pVCpu->hm.s.vmx.RestoreHost.uHostSelES = uSelES;
2192 uSelES = 0;
2193 }
2194 if (uSelFS & (X86_SEL_RPL | X86_SEL_LDT))
2195 {
2196 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_FS;
2197 pVCpu->hm.s.vmx.RestoreHost.uHostSelFS = uSelFS;
2198 uSelFS = 0;
2199 }
2200 if (uSelGS & (X86_SEL_RPL | X86_SEL_LDT))
2201 {
2202 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_GS;
2203 pVCpu->hm.s.vmx.RestoreHost.uHostSelGS = uSelGS;
2204 uSelGS = 0;
2205 }
2206#endif
2207
2208 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2209 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2210 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2211 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2212 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2213 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2214 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2215 Assert(!(uSelTR & X86_SEL_RPL)); Assert(!(uSelTR & X86_SEL_LDT));
2216 Assert(uSelCS);
2217 Assert(uSelTR);
2218
2219 /* Assertion is right but we would not have updated u32ExitCtls yet. */
2220#if 0
2221 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE))
2222 Assert(uSelSS != 0);
2223#endif
2224
2225 /* Write these host selector fields into the host-state area in the VMCS. */
2226 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS); AssertRCReturn(rc, rc);
2227 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS); AssertRCReturn(rc, rc);
2228#if HC_ARCH_BITS == 64
2229 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS); AssertRCReturn(rc, rc);
2230 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES); AssertRCReturn(rc, rc);
2231 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS); AssertRCReturn(rc, rc);
2232 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS); AssertRCReturn(rc, rc);
2233#endif
2234 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR); AssertRCReturn(rc, rc);
2235
2236 /*
2237 * Host GDTR and IDTR.
2238 */
2239 RTGDTR Gdtr;
2240 RT_ZERO(Gdtr);
2241#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2242 if (HMVMX_IS_64BIT_HOST_MODE())
2243 {
2244 X86XDTR64 Gdtr64;
2245 X86XDTR64 Idtr64;
2246 HMR0Get64bitGdtrAndIdtr(&Gdtr64, &Idtr64);
2247 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr); AssertRCReturn(rc, rc);
2248 rc = VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr); AssertRCReturn(rc, rc);
2249
2250 Gdtr.cbGdt = Gdtr64.cb;
2251 Gdtr.pGdt = (uintptr_t)Gdtr64.uAddr;
2252 }
2253 else
2254#endif
2255 {
2256 RTIDTR Idtr;
2257 ASMGetGDTR(&Gdtr);
2258 ASMGetIDTR(&Idtr);
2259 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); AssertRCReturn(rc, rc);
2260 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); AssertRCReturn(rc, rc);
2261
2262#if HC_ARCH_BITS == 64
2263 /*
2264 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the
2265 * maximum limit (0xffff) on every VM-exit.
2266 */
2267 if (Gdtr.cbGdt != 0xffff)
2268 {
2269 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
2270 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
2271 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
2272 }
2273
2274 /*
2275 * IDT limit is practically 0xfff. Therefore if the host has the limit as 0xfff, VT-x bloating the limit to 0xffff
2276 * is not a problem as it's not possible to get at them anyway. See Intel spec. 6.14.1 "64-Bit Mode IDT" and
2277 * Intel spec. 6.2 "Exception and Interrupt Vectors".
2278 */
2279 if (Idtr.cbIdt < 0x0fff)
2280 {
2281 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
2282 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
2283 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
2284 }
2285#endif
2286 }
2287
2288 /*
2289 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
2290 * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
2291 */
2292 if ((uSelTR & X86_SEL_MASK) > Gdtr.cbGdt)
2293 {
2294 AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt));
2295 return VERR_VMX_INVALID_HOST_STATE;
2296 }
2297
2298 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
2299#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2300 if (HMVMX_IS_64BIT_HOST_MODE())
2301 {
2302 /* We need the 64-bit TR base for hybrid darwin. */
2303 uint64_t u64TRBase = X86DESC64_BASE((PX86DESC64)pDesc);
2304 rc = VMXWriteVmcs64(VMX_VMCS_HOST_TR_BASE, u64TRBase);
2305 }
2306 else
2307#endif
2308 {
2309 uintptr_t uTRBase;
2310#if HC_ARCH_BITS == 64
2311 uTRBase = X86DESC64_BASE(pDesc);
2312#else
2313 uTRBase = X86DESC_BASE(pDesc);
2314#endif
2315 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
2316 }
2317 AssertRCReturn(rc, rc);
2318
2319 /*
2320 * Host FS base and GS base.
2321 */
2322#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2323 if (HMVMX_IS_64BIT_HOST_MODE())
2324 {
2325 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
2326 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
2327 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); AssertRCReturn(rc, rc);
2328 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); AssertRCReturn(rc, rc);
2329
2330# if HC_ARCH_BITS == 64
2331 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
2332 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
2333 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
2334 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
2335 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
2336# endif
2337 }
2338#endif
2339 return rc;
2340}
2341
2342
2343/**
2344 * Saves certain host MSRs in the VM-Exit MSR-load area and some in the
2345 * host-state area of the VMCS. Theses MSRs will be automatically restored on
2346 * the host after every successful VM exit.
2347 *
2348 * @returns VBox status code.
2349 * @param pVM Pointer to the VM.
2350 * @param pVCpu Pointer to the VMCPU.
2351 */
2352DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
2353{
2354 AssertPtr(pVCpu);
2355 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
2356
2357 int rc = VINF_SUCCESS;
2358#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
2359 PVMXMSR pHostMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvHostMsr;
2360 uint32_t cHostMsrs = 0;
2361 uint32_t u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX;
2362
2363 if (u32HostExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
2364 {
2365 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
2366
2367# if HC_ARCH_BITS == 64
2368 /* Paranoia. 64-bit code requires these bits to be set always. */
2369 Assert((u64HostEfer & (MSR_K6_EFER_LMA | MSR_K6_EFER_LME)) == (MSR_K6_EFER_LMA | MSR_K6_EFER_LME));
2370
2371 /*
2372 * We currently do not save/restore host EFER, we just make sure it doesn't get modified by VT-x operation.
2373 * All guest accesses (read, write) on EFER cause VM-exits. If we are to conditionally load the guest EFER for
2374 * some reason (e.g. allow transparent reads) we would activate the code below.
2375 */
2376# if 0
2377 /* All our supported 64-bit host platforms must have NXE bit set. Otherwise we can change the below code to save EFER. */
2378 Assert(u64HostEfer & (MSR_K6_EFER_NXE));
2379 /* The SCE bit is only applicable in 64-bit mode. Save EFER if it doesn't match what the guest has.
2380 See Intel spec. 30.10.4.3 "Handling the SYSCALL and SYSRET Instructions". */
2381 if (CPUMIsGuestInLongMode(pVCpu))
2382 {
2383 uint64_t u64GuestEfer;
2384 rc = CPUMQueryGuestMsr(pVCpu, MSR_K6_EFER, &u64GuestEfer);
2385 AssertRC(rc);
2386
2387 if ((u64HostEfer & MSR_K6_EFER_SCE) != (u64GuestEfer & MSR_K6_EFER_SCE))
2388 {
2389 pHostMsr->u32IndexMSR = MSR_K6_EFER;
2390 pHostMsr->u32Reserved = 0;
2391 pHostMsr->u64Value = u64HostEfer;
2392 pHostMsr++; cHostMsrs++;
2393 }
2394 }
2395# endif
2396# else /* HC_ARCH_BITS != 64 */
2397 pHostMsr->u32IndexMSR = MSR_K6_EFER;
2398 pHostMsr->u32Reserved = 0;
2399# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2400 if (CPUMIsGuestInLongMode(pVCpu))
2401 {
2402 /* Must match the EFER value in our 64 bits switcher. */
2403 pHostMsr->u64Value = u64HostEfer | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE;
2404 }
2405 else
2406# endif
2407 pHostMsr->u64Value = u64HostEfer;
2408 pHostMsr++; cHostMsrs++;
2409# endif /* HC_ARCH_BITS == 64 */
2410 }
2411
2412# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2413 if (HMVMX_IS_64BIT_HOST_MODE())
2414 {
2415 pHostMsr->u32IndexMSR = MSR_K6_STAR;
2416 pHostMsr->u32Reserved = 0;
2417 pHostMsr->u64Value = ASMRdMsr(MSR_K6_STAR); /* legacy syscall eip, cs & ss */
2418 pHostMsr++; cHostMsrs++;
2419 pHostMsr->u32IndexMSR = MSR_K8_LSTAR;
2420 pHostMsr->u32Reserved = 0;
2421 pHostMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64-bit mode syscall rip */
2422 pHostMsr++; cHostMsrs++;
2423 pHostMsr->u32IndexMSR = MSR_K8_SF_MASK;
2424 pHostMsr->u32Reserved = 0;
2425 pHostMsr->u64Value = ASMRdMsr(MSR_K8_SF_MASK); /* syscall flag mask */
2426 pHostMsr++; cHostMsrs++;
2427 pHostMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
2428 pHostMsr->u32Reserved = 0;
2429 pHostMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */
2430 pHostMsr++; cHostMsrs++;
2431 }
2432# endif
2433
2434 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
2435 if (RT_UNLIKELY(cHostMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)))
2436 {
2437 LogRel(("cHostMsrs=%u Cpu=%u\n", cHostMsrs, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)));
2438 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2439 }
2440
2441 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs);
2442#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
2443
2444 /*
2445 * Host Sysenter MSRs.
2446 */
2447 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
2448 AssertRCReturn(rc, rc);
2449#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2450 if (HMVMX_IS_64BIT_HOST_MODE())
2451 {
2452 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
2453 AssertRCReturn(rc, rc);
2454 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
2455 }
2456 else
2457 {
2458 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
2459 AssertRCReturn(rc, rc);
2460 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
2461 }
2462#elif HC_ARCH_BITS == 32
2463 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
2464 AssertRCReturn(rc, rc);
2465 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
2466#else
2467 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
2468 AssertRCReturn(rc, rc);
2469 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
2470#endif
2471 AssertRCReturn(rc, rc);
2472
2473 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT, IA32_EFER, also see
2474 * hmR0VmxSetupExitCtls() !! */
2475 return rc;
2476}
2477
2478
2479/**
2480 * Sets up VM-entry controls in the VMCS. These controls can affect things done
2481 * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
2482 * controls".
2483 *
2484 * @returns VBox status code.
2485 * @param pVCpu Pointer to the VMCPU.
2486 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2487 * out-of-sync. Make sure to update the required fields
2488 * before using them.
2489 *
2490 * @remarks No-long-jump zone!!!
2491 */
2492DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2493{
2494 int rc = VINF_SUCCESS;
2495 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_ENTRY_CTLS)
2496 {
2497 PVM pVM = pVCpu->CTX_SUFF(pVM);
2498 uint32_t val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0; /* Bits set here must be set in the VMCS. */
2499 uint32_t zap = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2500
2501 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
2502 val |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
2503
2504 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
2505 if (CPUMIsGuestInLongModeEx(pMixedCtx))
2506 val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
2507 else
2508 Assert(!(val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
2509
2510 /*
2511 * The following should not be set (since we're not in SMM mode):
2512 * - VMX_VMCS_CTRL_ENTRY_ENTRY_SMM
2513 * - VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON
2514 */
2515
2516 /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR,
2517 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR,
2518 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR */
2519
2520 if ((val & zap) != val)
2521 {
2522 LogRel(("hmR0VmxLoadGuestEntryCtls: invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
2523 pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0, val, zap));
2524 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2525 }
2526
2527 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, val);
2528 AssertRCReturn(rc, rc);
2529
2530 /* Update VCPU with the currently set VM-exit controls. */
2531 pVCpu->hm.s.vmx.u32EntryCtls = val;
2532 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_ENTRY_CTLS;
2533 }
2534 return rc;
2535}
2536
2537
2538/**
2539 * Sets up the VM-exit controls in the VMCS.
2540 *
2541 * @returns VBox status code.
2542 * @param pVM Pointer to the VM.
2543 * @param pVCpu Pointer to the VMCPU.
2544 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2545 * out-of-sync. Make sure to update the required fields
2546 * before using them.
2547 *
2548 * @remarks requires EFER.
2549 */
2550DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2551{
2552 int rc = VINF_SUCCESS;
2553 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_EXIT_CTLS)
2554 {
2555 PVM pVM = pVCpu->CTX_SUFF(pVM);
2556 uint32_t val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0; /* Bits set here must be set in the VMCS. */
2557 uint32_t zap = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2558
2559 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
2560 val |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
2561
2562 /*
2563 * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary.
2564 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in hmR0VmxSaveHostMsrs().
2565 */
2566#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2567 if (HMVMX_IS_64BIT_HOST_MODE())
2568 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
2569 else
2570 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
2571#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
2572 if (CPUMIsGuestInLongModeEx(pMixedCtx))
2573 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE; /* The switcher goes to long mode. */
2574 else
2575 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
2576#endif
2577
2578 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
2579 Assert(!(val & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));
2580
2581 /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
2582 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR,
2583 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR,
2584 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR,
2585 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR. */
2586
2587 if (pVM->hm.s.vmx.msr.vmx_exit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)
2588 val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
2589
2590 if ((val & zap) != val)
2591 {
2592 LogRel(("hmR0VmxSetupProcCtls: invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
2593 pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0, val, zap));
2594 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2595 }
2596
2597 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, val);
2598 AssertRCReturn(rc, rc);
2599
2600 /* Update VCPU with the currently set VM-exit controls. */
2601 pVCpu->hm.s.vmx.u32ExitCtls = val;
2602 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_EXIT_CTLS;
2603 }
2604 return rc;
2605}
2606
2607
2608/**
2609 * Loads the guest APIC and related state.
2610 *
2611 * @returns VBox status code.
2612 * @param pVM Pointer to the VM.
2613 * @param pVCpu Pointer to the VMCPU.
2614 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2615 * out-of-sync. Make sure to update the required fields
2616 * before using them.
2617 */
2618DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2619{
2620 int rc = VINF_SUCCESS;
2621 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_APIC_STATE)
2622 {
2623 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
2624 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
2625 {
2626 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2627
2628 bool fPendingIntr = false;
2629 uint8_t u8Tpr = 0;
2630 uint8_t u8PendingIntr = 0;
2631 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
2632 AssertRCReturn(rc, rc);
2633
2634 /*
2635 * If there are external interrupts pending but masked by the TPR value, instruct VT-x to cause a VM-exit when
2636 * the guest lowers its TPR below the highest-priority pending interrupt and we can deliver the interrupt.
2637 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
2638 * the interrupt when we VM-exit for other reasons.
2639 */
2640 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
2641 uint32_t u32TprThreshold = 0;
2642 if (fPendingIntr)
2643 {
2644 /* Bits 3-0 of the TPR threshold field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
2645 const uint8_t u8PendingPriority = (u8PendingIntr >> 4);
2646 const uint8_t u8TprPriority = (u8Tpr >> 4) & 7;
2647 if (u8PendingPriority <= u8TprPriority)
2648 u32TprThreshold = u8PendingPriority;
2649 else
2650 u32TprThreshold = u8TprPriority; /* Required for Vista 64-bit guest, see @bugref{6398}. */
2651 }
2652 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
2653
2654 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
2655 AssertRCReturn(rc, rc);
2656 }
2657
2658 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_APIC_STATE;
2659 }
2660 return rc;
2661}
2662
2663
2664/**
2665 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
2666 *
2667 * @returns Guest's interruptibility-state.
2668 * @param pVCpu Pointer to the VMCPU.
2669 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2670 * out-of-sync. Make sure to update the required fields
2671 * before using them.
2672 *
2673 * @remarks No-long-jump zone!!!
2674 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
2675 */
2676DECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2677{
2678 /*
2679 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
2680 * inhibit interrupts or clear any existing interrupt-inhibition.
2681 */
2682 uint32_t uIntrState = 0;
2683 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2684 {
2685 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
2686 AssertMsg((pVCpu->hm.s.vmx.fUpdatedGuestState & (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS))
2687 == (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS), ("%#x\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
2688 if (pMixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
2689 {
2690 /*
2691 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
2692 * VT-x, the flag's condition to be cleared is met and thus the cleared state is correct.
2693 */
2694 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2695 }
2696 else if (pMixedCtx->eflags.Bits.u1IF)
2697 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
2698 else
2699 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
2700 }
2701 return uIntrState;
2702}
2703
2704
2705/**
2706 * Loads the guest's interruptibility-state into the guest-state area in the
2707 * VMCS.
2708 *
2709 * @returns VBox status code.
2710 * @param pVCpu Pointer to the VMCPU.
2711 * @param uIntrState The interruptibility-state to set.
2712 */
2713static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
2714{
2715 AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */
2716 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
2717 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
2718 AssertRCReturn(rc, rc);
2719 return rc;
2720}
2721
2722
2723/**
2724 * Loads the guest's RIP into the guest-state area in the VMCS.
2725 *
2726 * @returns VBox status code.
2727 * @param pVCpu Pointer to the VMCPU.
2728 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2729 * out-of-sync. Make sure to update the required fields
2730 * before using them.
2731 *
2732 * @remarks No-long-jump zone!!!
2733 */
2734static int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2735{
2736 int rc = VINF_SUCCESS;
2737 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RIP)
2738 {
2739 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
2740 AssertRCReturn(rc, rc);
2741 Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip));
2742 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP;
2743 }
2744 return rc;
2745}
2746
2747
2748/**
2749 * Loads the guest's RSP into the guest-state area in the VMCS.
2750 *
2751 * @returns VBox status code.
2752 * @param pVCpu Pointer to the VMCPU.
2753 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2754 * out-of-sync. Make sure to update the required fields
2755 * before using them.
2756 *
2757 * @remarks No-long-jump zone!!!
2758 */
2759static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2760{
2761 int rc = VINF_SUCCESS;
2762 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RSP)
2763 {
2764 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
2765 AssertRCReturn(rc, rc);
2766 Log4(("Load: VMX_VMCS_GUEST_RSP=%#RX64\n", pMixedCtx->rsp));
2767 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP;
2768 }
2769 return rc;
2770}
2771
2772
2773/**
2774 * Loads the guest's RFLAGS into the guest-state area in the VMCS.
2775 *
2776 * @returns VBox status code.
2777 * @param pVCpu Pointer to the VMCPU.
2778 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2779 * out-of-sync. Make sure to update the required fields
2780 * before using them.
2781 *
2782 * @remarks No-long-jump zone!!!
2783 */
2784static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2785{
2786 int rc = VINF_SUCCESS;
2787 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)
2788 {
2789 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
2790 Let us assert it as such and use 32-bit VMWRITE. */
2791 Assert(!(pMixedCtx->rflags.u64 >> 32));
2792 X86EFLAGS uEFlags = pMixedCtx->eflags;
2793 uEFlags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */
2794 uEFlags.u32 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */
2795
2796 /*
2797 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM exit.
2798 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
2799 */
2800 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2801 {
2802 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2803 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2804 pVCpu->hm.s.vmx.RealMode.eflags.u32 = uEFlags.u32; /* Save the original eflags of the real-mode guest. */
2805 uEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
2806 uEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
2807 }
2808
2809 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, uEFlags.u32);
2810 AssertRCReturn(rc, rc);
2811
2812 Log4(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", uEFlags.u32));
2813 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;
2814 }
2815 return rc;
2816}
2817
2818
2819/**
2820 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
2821 *
2822 * @returns VBox status code.
2823 * @param pVCpu Pointer to the VMCPU.
2824 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2825 * out-of-sync. Make sure to update the required fields
2826 * before using them.
2827 *
2828 * @remarks No-long-jump zone!!!
2829 */
2830DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2831{
2832 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
2833 AssertRCReturn(rc, rc);
2834 rc = hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
2835 AssertRCReturn(rc, rc);
2836 rc = hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
2837 AssertRCReturn(rc, rc);
2838 return rc;
2839}
2840
2841
2842/**
2843 * Loads the guest control registers (CR0, CR3, CR4) into the guest-state area
2844 * in the VMCS.
2845 *
2846 * @returns VBox status code.
2847 * @param pVM Pointer to the VM.
2848 * @param pVCpu Pointer to the VMCPU.
2849 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2850 * out-of-sync. Make sure to update the required fields
2851 * before using them.
2852 *
2853 * @remarks No-long-jump zone!!!
2854 */
2855static int hmR0VmxLoadGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
2856{
2857 int rc = VINF_SUCCESS;
2858 PVM pVM = pVCpu->CTX_SUFF(pVM);
2859
2860 /*
2861 * Guest CR0.
2862 * Guest FPU.
2863 */
2864 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
2865 {
2866 Assert(!(pCtx->cr0 >> 32));
2867 uint32_t u32GuestCR0 = pCtx->cr0;
2868
2869 /* The guest's view (read access) of its CR0 is unblemished. */
2870 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0);
2871 AssertRCReturn(rc, rc);
2872 Log4(("Load: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", u32GuestCR0));
2873
2874 /* Setup VT-x's view of the guest CR0. */
2875 /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
2876 if (pVM->hm.s.fNestedPaging)
2877 {
2878 if (CPUMIsGuestPagingEnabledEx(pCtx))
2879 {
2880 /* The guest has paging enabled, let it access CR3 without causing a VM exit if supported. */
2881 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
2882 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
2883 }
2884 else
2885 {
2886 /* The guest doesn't have paging enabled, make CR3 access to cause VM exits to update our shadow. */
2887 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
2888 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
2889 }
2890
2891 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2892 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2893 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
2894
2895 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
2896 AssertRCReturn(rc, rc);
2897 }
2898 else
2899 u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2900
2901 /*
2902 * Guest FPU bits.
2903 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
2904 * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2905 */
2906 u32GuestCR0 |= X86_CR0_NE;
2907 bool fInterceptNM = false;
2908 if (CPUMIsGuestFPUStateActive(pVCpu))
2909 {
2910 fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */
2911 /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.
2912 We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */
2913 }
2914 else
2915 {
2916 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
2917 u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
2918 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
2919 }
2920
2921 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
2922 bool fInterceptMF = false;
2923 if (!(pCtx->cr0 & X86_CR0_NE))
2924 fInterceptMF = true;
2925
2926 /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
2927 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2928 {
2929 Assert(PDMVmmDevHeapIsEnabled(pVM));
2930 Assert(pVM->hm.s.vmx.pRealModeTSS);
2931 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2932 fInterceptNM = true;
2933 fInterceptMF = true;
2934 }
2935 else
2936 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2937
2938 if (fInterceptNM)
2939 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);
2940 else
2941 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);
2942
2943 if (fInterceptMF)
2944 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
2945 else
2946 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);
2947
2948 /* Additional intercepts for debugging, define these yourself explicitly. */
2949#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2950 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_BP)
2951 | RT_BIT(X86_XCPT_DB)
2952 | RT_BIT(X86_XCPT_DE)
2953 | RT_BIT(X86_XCPT_NM)
2954 | RT_BIT(X86_XCPT_UD)
2955 | RT_BIT(X86_XCPT_NP)
2956 | RT_BIT(X86_XCPT_SS)
2957 | RT_BIT(X86_XCPT_GP)
2958 | RT_BIT(X86_XCPT_PF)
2959 | RT_BIT(X86_XCPT_MF);
2960#elif defined(HMVMX_ALWAYS_TRAP_PF)
2961 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
2962#endif
2963
2964 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
2965
2966 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
2967 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 & pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
2968 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 | pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
2969 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
2970 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
2971 else
2972 Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2973
2974 u32GuestCR0 |= uSetCR0;
2975 u32GuestCR0 &= uZapCR0;
2976 u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
2977
2978 /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */
2979 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
2980 AssertRCReturn(rc, rc);
2981 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
2982 AssertRCReturn(rc, rc);
2983 Log4(("Load: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", u32GuestCR0, uSetCR0, uZapCR0));
2984
2985 /*
2986 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
2987 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
2988 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
2989 */
2990 uint32_t u32CR0Mask = 0;
2991 u32CR0Mask = X86_CR0_PE
2992 | X86_CR0_NE
2993 | X86_CR0_WP
2994 | X86_CR0_PG
2995 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
2996 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
2997 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
2998 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2999 u32CR0Mask &= ~X86_CR0_PE;
3000 if (pVM->hm.s.fNestedPaging)
3001 u32CR0Mask &= ~X86_CR0_WP;
3002
3003 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
3004 if (fInterceptNM)
3005 u32CR0Mask |= (X86_CR0_TS | X86_CR0_MP);
3006 else
3007 u32CR0Mask &= ~(X86_CR0_TS | X86_CR0_MP);
3008
3009 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
3010 pVCpu->hm.s.vmx.u32CR0Mask = u32CR0Mask;
3011 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask);
3012 AssertRCReturn(rc, rc);
3013
3014 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
3015 }
3016
3017 /*
3018 * Guest CR2.
3019 * It's always loaded in the assembler code. Nothing to do here.
3020 */
3021
3022 /*
3023 * Guest CR3.
3024 */
3025 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
3026 {
3027 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
3028 if (pVM->hm.s.fNestedPaging)
3029 {
3030 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
3031
3032 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
3033 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
3034 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
3035 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
3036
3037 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
3038 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
3039 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
3040
3041 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
3042 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
3043 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x3f) == 0, /* Bits 6:11 MBZ. */
3044 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3045
3046 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
3047 AssertRCReturn(rc, rc);
3048 Log4(("Load: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3049
3050 if ( pVM->hm.s.vmx.fUnrestrictedGuest
3051 || CPUMIsGuestPagingEnabledEx(pCtx))
3052 {
3053 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
3054 if (CPUMIsGuestInPAEModeEx(pCtx))
3055 {
3056 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); AssertRCReturn(rc, rc);
3057 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
3058 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
3059 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
3060 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
3061 }
3062
3063 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
3064 have Unrestricted Execution to handle the guest when it's not using paging. */
3065 GCPhysGuestCR3 = pCtx->cr3;
3066 }
3067 else
3068 {
3069 /*
3070 * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
3071 * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
3072 * EPT takes care of translating it to host-physical addresses.
3073 */
3074 RTGCPHYS GCPhys;
3075 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
3076 Assert(PDMVmmDevHeapIsEnabled(pVM));
3077
3078 /* We obtain it here every time as the guest could have relocated this PCI region. */
3079 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
3080 AssertRCReturn(rc, rc);
3081
3082 GCPhysGuestCR3 = GCPhys;
3083 }
3084
3085 Log4(("Load: VMX_VMCS_GUEST_CR3=%#RGv (GstN)\n", GCPhysGuestCR3));
3086 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
3087 }
3088 else
3089 {
3090 /* Non-nested paging case, just use the hypervisor's CR3. */
3091 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
3092
3093 Log4(("Load: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", HCPhysGuestCR3));
3094 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
3095 }
3096 AssertRCReturn(rc, rc);
3097
3098 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR3;
3099 }
3100
3101 /*
3102 * Guest CR4.
3103 */
3104 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
3105 {
3106 Assert(!(pCtx->cr4 >> 32));
3107 uint32_t u32GuestCR4 = pCtx->cr4;
3108
3109 /* The guest's view of its CR4 is unblemished. */
3110 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4);
3111 AssertRCReturn(rc, rc);
3112 Log4(("Load: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", u32GuestCR4));
3113
3114 /* Setup VT-x's view of the guest CR4. */
3115 /*
3116 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
3117 * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
3118 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
3119 */
3120 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3121 {
3122 Assert(pVM->hm.s.vmx.pRealModeTSS);
3123 Assert(PDMVmmDevHeapIsEnabled(pVM));
3124 u32GuestCR4 &= ~X86_CR4_VME;
3125 }
3126
3127 if (pVM->hm.s.fNestedPaging)
3128 {
3129 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
3130 && !pVM->hm.s.vmx.fUnrestrictedGuest)
3131 {
3132 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
3133 u32GuestCR4 |= X86_CR4_PSE;
3134 /* Our identity mapping is a 32 bits page directory. */
3135 u32GuestCR4 &= ~X86_CR4_PAE;
3136 }
3137 /* else use guest CR4.*/
3138 }
3139 else
3140 {
3141 /*
3142 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
3143 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
3144 */
3145 switch (pVCpu->hm.s.enmShadowMode)
3146 {
3147 case PGMMODE_REAL: /* Real-mode. */
3148 case PGMMODE_PROTECTED: /* Protected mode without paging. */
3149 case PGMMODE_32_BIT: /* 32-bit paging. */
3150 {
3151 u32GuestCR4 &= ~X86_CR4_PAE;
3152 break;
3153 }
3154
3155 case PGMMODE_PAE: /* PAE paging. */
3156 case PGMMODE_PAE_NX: /* PAE paging with NX. */
3157 {
3158 u32GuestCR4 |= X86_CR4_PAE;
3159 break;
3160 }
3161
3162 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
3163 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
3164#ifdef VBOX_ENABLE_64_BITS_GUESTS
3165 break;
3166#endif
3167 default:
3168 AssertFailed();
3169 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
3170 }
3171 }
3172
3173 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
3174 uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 & pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
3175 uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 | pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
3176 u32GuestCR4 |= uSetCR4;
3177 u32GuestCR4 &= uZapCR4;
3178
3179 /* Write VT-x's view of the guest CR4 into the VMCS. */
3180 Log4(("Load: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", u32GuestCR4, uSetCR4, uZapCR4));
3181 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4);
3182 AssertRCReturn(rc, rc);
3183
3184 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM exit. */
3185 uint32_t u32CR4Mask = 0;
3186 u32CR4Mask = X86_CR4_VME
3187 | X86_CR4_PAE
3188 | X86_CR4_PGE
3189 | X86_CR4_PSE
3190 | X86_CR4_VMXE;
3191 pVCpu->hm.s.vmx.u32CR4Mask = u32CR4Mask;
3192 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask);
3193 AssertRCReturn(rc, rc);
3194
3195 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;
3196 }
3197 return rc;
3198}
3199
3200
3201/**
3202 * Loads the guest debug registers into the guest-state area in the VMCS.
3203 * This also sets up whether #DB and MOV DRx accesses cause VM exits.
3204 *
3205 * @returns VBox status code.
3206 * @param pVCpu Pointer to the VMCPU.
3207 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3208 * out-of-sync. Make sure to update the required fields
3209 * before using them.
3210 *
3211 * @remarks No-long-jump zone!!!
3212 */
3213static int hmR0VmxLoadGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3214{
3215 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
3216 return VINF_SUCCESS;
3217
3218#ifdef VBOX_STRICT
3219 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
3220 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
3221 {
3222 Assert(!(pMixedCtx->dr[7] >> 32)); /* upper 32 bits are reserved (MBZ). */
3223 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
3224 Assert((pMixedCtx->dr[7] & 0xd800) == 0); /* bits 15, 14, 12, 11 are reserved (MBZ). */
3225 Assert((pMixedCtx->dr[7] & 0x400) == 0x400); /* bit 10 is reserved (MB1). */
3226 }
3227#endif
3228
3229 int rc = VERR_INTERNAL_ERROR_5;
3230 PVM pVM = pVCpu->CTX_SUFF(pVM);
3231 bool fInterceptDB = false;
3232 bool fInterceptMovDRx = false;
3233 if (DBGFIsStepping(pVCpu))
3234 {
3235 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
3236 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
3237 {
3238 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
3239 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3240 AssertRCReturn(rc, rc);
3241 Assert(fInterceptDB == false);
3242 }
3243 else
3244 {
3245 fInterceptDB = true;
3246 pMixedCtx->eflags.u32 |= X86_EFL_TF;
3247 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RFLAGS;
3248 }
3249 }
3250
3251 if (CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
3252 {
3253 if (!CPUMIsHyperDebugStateActive(pVCpu))
3254 {
3255 rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
3256 AssertRC(rc);
3257 }
3258 Assert(CPUMIsHyperDebugStateActive(pVCpu));
3259 fInterceptMovDRx = true;
3260 }
3261 else if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
3262 {
3263 if (!CPUMIsGuestDebugStateActive(pVCpu))
3264 {
3265 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
3266 AssertRC(rc);
3267 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
3268 }
3269 Assert(CPUMIsGuestDebugStateActive(pVCpu));
3270 Assert(fInterceptMovDRx == false);
3271 }
3272 else if (!CPUMIsGuestDebugStateActive(pVCpu))
3273 {
3274 /* For the first time we would need to intercept MOV DRx accesses even when the guest debug registers aren't loaded. */
3275 fInterceptMovDRx = true;
3276 }
3277
3278 /* Update the exception bitmap regarding intercepting #DB generated by the guest. */
3279 if (fInterceptDB)
3280 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
3281 else if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3282 {
3283#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3284 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
3285#endif
3286 }
3287
3288 /* Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions. */
3289 if (fInterceptMovDRx)
3290 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
3291 else
3292 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
3293
3294 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3295 AssertRCReturn(rc, rc);
3296 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3297 AssertRCReturn(rc, rc);
3298
3299 /* The guest's view of its DR7 is unblemished. Use 32-bit write as upper 32-bits MBZ as asserted above. */
3300 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
3301 AssertRCReturn(rc, rc);
3302
3303 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
3304 return rc;
3305}
3306
3307
3308#ifdef VBOX_STRICT
3309/**
3310 * Strict function to validate segment registers.
3311 *
3312 * @remarks Requires CR0.
3313 */
3314static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3315{
3316 /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3317 /* NOTE: The reason we check for attribute value 0 and not just the unusable bit here is because hmR0VmxWriteSegmentReg()
3318 * only updates the VMCS bits with the unusable bit and doesn't change the guest-context value. */
3319 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
3320 && ( !CPUMIsGuestInRealModeEx(pCtx)
3321 && !CPUMIsGuestInV86ModeEx(pCtx)))
3322 {
3323 /* Protected mode checks */
3324 /* CS */
3325 Assert(pCtx->cs.Attr.n.u1Present);
3326 Assert(!(pCtx->cs.Attr.u & 0xf00));
3327 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
3328 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
3329 || !(pCtx->cs.Attr.n.u1Granularity));
3330 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
3331 || (pCtx->cs.Attr.n.u1Granularity));
3332 /* CS cannot be loaded with NULL in protected mode. */
3333 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & HMVMX_SEL_UNUSABLE)); /** @todo is this really true even for 64-bit CS?!? */
3334 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
3335 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
3336 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
3337 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
3338 else
3339 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
3340 /* SS */
3341 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
3342 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
3343 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0));
3344 if ( !(pCtx->cr0 & X86_CR0_PE)
3345 || pCtx->cs.Attr.n.u4Type == 3)
3346 {
3347 Assert(!pCtx->ss.Attr.n.u2Dpl);
3348 }
3349 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & HMVMX_SEL_UNUSABLE))
3350 {
3351 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
3352 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
3353 Assert(pCtx->ss.Attr.n.u1Present);
3354 Assert(!(pCtx->ss.Attr.u & 0xf00));
3355 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
3356 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
3357 || !(pCtx->ss.Attr.n.u1Granularity));
3358 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
3359 || (pCtx->ss.Attr.n.u1Granularity));
3360 }
3361 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
3362 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & HMVMX_SEL_UNUSABLE))
3363 {
3364 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3365 Assert(pCtx->ds.Attr.n.u1Present);
3366 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
3367 Assert(!(pCtx->ds.Attr.u & 0xf00));
3368 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
3369 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
3370 || !(pCtx->ds.Attr.n.u1Granularity));
3371 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
3372 || (pCtx->ds.Attr.n.u1Granularity));
3373 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3374 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
3375 }
3376 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & HMVMX_SEL_UNUSABLE))
3377 {
3378 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3379 Assert(pCtx->es.Attr.n.u1Present);
3380 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
3381 Assert(!(pCtx->es.Attr.u & 0xf00));
3382 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
3383 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
3384 || !(pCtx->es.Attr.n.u1Granularity));
3385 Assert( !(pCtx->es.u32Limit & 0xfff00000)
3386 || (pCtx->es.Attr.n.u1Granularity));
3387 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3388 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
3389 }
3390 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & HMVMX_SEL_UNUSABLE))
3391 {
3392 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3393 Assert(pCtx->fs.Attr.n.u1Present);
3394 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
3395 Assert(!(pCtx->fs.Attr.u & 0xf00));
3396 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
3397 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
3398 || !(pCtx->fs.Attr.n.u1Granularity));
3399 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
3400 || (pCtx->fs.Attr.n.u1Granularity));
3401 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3402 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
3403 }
3404 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & HMVMX_SEL_UNUSABLE))
3405 {
3406 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3407 Assert(pCtx->gs.Attr.n.u1Present);
3408 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
3409 Assert(!(pCtx->gs.Attr.u & 0xf00));
3410 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
3411 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
3412 || !(pCtx->gs.Attr.n.u1Granularity));
3413 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
3414 || (pCtx->gs.Attr.n.u1Granularity));
3415 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3416 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
3417 }
3418 /* 64-bit capable CPUs. */
3419# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3420 Assert(!(pCtx->cs.u64Base >> 32));
3421 Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
3422 Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
3423 Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
3424# endif
3425 }
3426 else if ( CPUMIsGuestInV86ModeEx(pCtx)
3427 || ( CPUMIsGuestInRealModeEx(pCtx)
3428 && !pVM->hm.s.vmx.fUnrestrictedGuest))
3429 {
3430 /* Real and v86 mode checks. */
3431 /* hmR0VmxWriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
3432 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
3433 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3434 {
3435 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
3436 }
3437 else
3438 {
3439 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
3440 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
3441 }
3442
3443 /* CS */
3444 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
3445 Assert(pCtx->cs.u32Limit == 0xffff);
3446 Assert(u32CSAttr == 0xf3);
3447 /* SS */
3448 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
3449 Assert(pCtx->ss.u32Limit == 0xffff);
3450 Assert(u32SSAttr == 0xf3);
3451 /* DS */
3452 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
3453 Assert(pCtx->ds.u32Limit == 0xffff);
3454 Assert(u32DSAttr == 0xf3);
3455 /* ES */
3456 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
3457 Assert(pCtx->es.u32Limit == 0xffff);
3458 Assert(u32ESAttr == 0xf3);
3459 /* FS */
3460 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
3461 Assert(pCtx->fs.u32Limit == 0xffff);
3462 Assert(u32FSAttr == 0xf3);
3463 /* GS */
3464 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
3465 Assert(pCtx->gs.u32Limit == 0xffff);
3466 Assert(u32GSAttr == 0xf3);
3467 /* 64-bit capable CPUs. */
3468# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3469 Assert(!(pCtx->cs.u64Base >> 32));
3470 Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
3471 Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
3472 Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
3473# endif
3474 }
3475}
3476#endif /* VBOX_STRICT */
3477
3478
3479/**
3480 * Writes a guest segment register into the guest-state area in the VMCS.
3481 *
3482 * @returns VBox status code.
3483 * @param pVCpu Pointer to the VMCPU.
3484 * @param idxSel Index of the selector in the VMCS.
3485 * @param idxLimit Index of the segment limit in the VMCS.
3486 * @param idxBase Index of the segment base in the VMCS.
3487 * @param idxAccess Index of the access rights of the segment in the VMCS.
3488 * @param pSelReg Pointer to the segment selector.
3489 * @param pCtx Pointer to the guest-CPU context.
3490 *
3491 * @remarks No-long-jump zone!!!
3492 */
3493static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
3494 uint32_t idxAccess, PCPUMSELREG pSelReg, PCPUMCTX pCtx)
3495{
3496 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
3497 AssertRCReturn(rc, rc);
3498 rc = VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
3499 AssertRCReturn(rc, rc);
3500 rc = VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
3501 AssertRCReturn(rc, rc);
3502
3503 uint32_t u32Access = pSelReg->Attr.u;
3504 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3505 {
3506 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
3507 u32Access = 0xf3;
3508 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3509 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3510 }
3511 else
3512 {
3513 /*
3514 * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
3515 * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
3516 * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
3517 * loaded in protected-mode have their attribute as 0.
3518 */
3519 if (!u32Access)
3520 u32Access = HMVMX_SEL_UNUSABLE;
3521 }
3522
3523 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
3524 AssertMsg((u32Access & HMVMX_SEL_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
3525 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
3526
3527 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
3528 AssertRCReturn(rc, rc);
3529 return rc;
3530}
3531
3532
3533/**
3534 * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
3535 * into the guest-state area in the VMCS.
3536 *
3537 * @returns VBox status code.
3538 * @param pVM Pointer to the VM.
3539 * @param pVCPU Pointer to the VMCPU.
3540 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3541 * out-of-sync. Make sure to update the required fields
3542 * before using them.
3543 *
3544 * @remarks Requires CR0 (strict builds validation).
3545 * @remarks No-long-jump zone!!!
3546 */
3547static int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3548{
3549 int rc = VERR_INTERNAL_ERROR_5;
3550 PVM pVM = pVCpu->CTX_SUFF(pVM);
3551
3552 /*
3553 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
3554 */
3555 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
3556 {
3557 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
3558 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3559 {
3560 pVCpu->hm.s.vmx.RealMode.uAttrCS.u = pMixedCtx->cs.Attr.u;
3561 pVCpu->hm.s.vmx.RealMode.uAttrSS.u = pMixedCtx->ss.Attr.u;
3562 pVCpu->hm.s.vmx.RealMode.uAttrDS.u = pMixedCtx->ds.Attr.u;
3563 pVCpu->hm.s.vmx.RealMode.uAttrES.u = pMixedCtx->es.Attr.u;
3564 pVCpu->hm.s.vmx.RealMode.uAttrFS.u = pMixedCtx->fs.Attr.u;
3565 pVCpu->hm.s.vmx.RealMode.uAttrGS.u = pMixedCtx->gs.Attr.u;
3566 }
3567
3568#ifdef VBOX_WITH_REM
3569 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
3570 {
3571 Assert(pVM->hm.s.vmx.pRealModeTSS);
3572 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
3573 if ( pVCpu->hm.s.vmx.fWasInRealMode
3574 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
3575 {
3576 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
3577 in real-mode (e.g. OpenBSD 4.0) */
3578 REMFlushTBs(pVM);
3579 Log4(("Load: Switch to protected mode detected!\n"));
3580 pVCpu->hm.s.vmx.fWasInRealMode = false;
3581 }
3582 }
3583#endif
3584 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
3585 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs, pMixedCtx);
3586 AssertRCReturn(rc, rc);
3587 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
3588 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss, pMixedCtx);
3589 AssertRCReturn(rc, rc);
3590 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
3591 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds, pMixedCtx);
3592 AssertRCReturn(rc, rc);
3593 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
3594 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es, pMixedCtx);
3595 AssertRCReturn(rc, rc);
3596 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
3597 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs, pMixedCtx);
3598 AssertRCReturn(rc, rc);
3599 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
3600 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs, pMixedCtx);
3601 AssertRCReturn(rc, rc);
3602
3603 Log4(("Load: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base,
3604 pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
3605#ifdef VBOX_STRICT
3606 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
3607#endif
3608 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
3609 }
3610
3611 /*
3612 * Guest TR.
3613 */
3614 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
3615 {
3616 /*
3617 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
3618 * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
3619 * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
3620 */
3621 uint16_t u16Sel = 0;
3622 uint32_t u32Limit = 0;
3623 uint64_t u64Base = 0;
3624 uint32_t u32AccessRights = 0;
3625
3626 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3627 {
3628 u16Sel = pMixedCtx->tr.Sel;
3629 u32Limit = pMixedCtx->tr.u32Limit;
3630 u64Base = pMixedCtx->tr.u64Base;
3631 u32AccessRights = pMixedCtx->tr.Attr.u;
3632 }
3633 else
3634 {
3635 Assert(pVM->hm.s.vmx.pRealModeTSS);
3636 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
3637
3638 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
3639 RTGCPHYS GCPhys;
3640 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
3641 AssertRCReturn(rc, rc);
3642
3643 X86DESCATTR DescAttr;
3644 DescAttr.u = 0;
3645 DescAttr.n.u1Present = 1;
3646 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
3647
3648 u16Sel = 0;
3649 u32Limit = HM_VTX_TSS_SIZE;
3650 u64Base = GCPhys; /* in real-mode phys = virt. */
3651 u32AccessRights = DescAttr.u;
3652 }
3653
3654 /* Validate. */
3655 Assert(!(u16Sel & RT_BIT(2)));
3656 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
3657 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
3658 AssertMsg(!(u32AccessRights & HMVMX_SEL_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
3659 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
3660 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
3661 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
3662 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
3663 Assert( (u32Limit & 0xfff) == 0xfff
3664 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
3665 Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000)
3666 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
3667
3668 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel); AssertRCReturn(rc, rc);
3669 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRCReturn(rc, rc);
3670 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRCReturn(rc, rc);
3671 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRCReturn(rc, rc);
3672
3673 Log4(("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base));
3674 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR;
3675 }
3676
3677 /*
3678 * Guest GDTR.
3679 */
3680 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
3681 {
3682 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); AssertRCReturn(rc, rc);
3683 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); AssertRCReturn(rc, rc);
3684
3685 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
3686 Log4(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt));
3687 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;
3688 }
3689
3690 /*
3691 * Guest LDTR.
3692 */
3693 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
3694 {
3695 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
3696 uint32_t u32Access = 0;
3697 if (!pMixedCtx->ldtr.Attr.u)
3698 u32Access = HMVMX_SEL_UNUSABLE;
3699 else
3700 u32Access = pMixedCtx->ldtr.Attr.u;
3701
3702 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel); AssertRCReturn(rc, rc);
3703 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit); AssertRCReturn(rc, rc);
3704 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base); AssertRCReturn(rc, rc);
3705 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRCReturn(rc, rc);
3706
3707 /* Validate. */
3708 if (!(u32Access & HMVMX_SEL_UNUSABLE))
3709 {
3710 Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
3711 Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
3712 Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
3713 Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
3714 Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
3715 Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
3716 Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
3717 || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
3718 Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
3719 || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
3720 }
3721
3722 Log4(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base));
3723 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;
3724 }
3725
3726 /*
3727 * Guest IDTR.
3728 */
3729 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
3730 {
3731 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); AssertRCReturn(rc, rc);
3732 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); AssertRCReturn(rc, rc);
3733
3734 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
3735 Log4(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt));
3736 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
3737 }
3738
3739 return VINF_SUCCESS;
3740}
3741
3742
3743/**
3744 * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
3745 * areas. These MSRs will automatically be loaded to the host CPU on every
3746 * successful VM entry and stored from the host CPU on every successful VM exit.
3747 * Also loads the sysenter MSRs into the guest-state area in the VMCS.
3748 *
3749 * @returns VBox status code.
3750 * @param pVCpu Pointer to the VMCPU.
3751 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3752 * out-of-sync. Make sure to update the required fields
3753 * before using them.
3754 *
3755 * @remarks No-long-jump zone!!!
3756 */
3757static int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3758{
3759 AssertPtr(pVCpu);
3760 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
3761
3762 /*
3763 * MSRs covered by Auto-load/store: EFER, LSTAR, STAR, SF_MASK, TSC_AUX (RDTSCP).
3764 */
3765 int rc = VINF_SUCCESS;
3766 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
3767 {
3768#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
3769 PVM pVM = pVCpu->CTX_SUFF(pVM);
3770 PVMXMSR pGuestMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
3771 uint32_t cGuestMsrs = 0;
3772
3773 /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */
3774 /** @todo r=ramshankar: Optimize this further to do lazy restoration and only
3775 * when the guest really is in 64-bit mode. */
3776 bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
3777 if (fSupportsLongMode)
3778 {
3779 pGuestMsr->u32IndexMSR = MSR_K8_LSTAR;
3780 pGuestMsr->u32Reserved = 0;
3781 pGuestMsr->u64Value = pMixedCtx->msrLSTAR; /* 64 bits mode syscall rip */
3782 pGuestMsr++; cGuestMsrs++;
3783 pGuestMsr->u32IndexMSR = MSR_K6_STAR;
3784 pGuestMsr->u32Reserved = 0;
3785 pGuestMsr->u64Value = pMixedCtx->msrSTAR; /* legacy syscall eip, cs & ss */
3786 pGuestMsr++; cGuestMsrs++;
3787 pGuestMsr->u32IndexMSR = MSR_K8_SF_MASK;
3788 pGuestMsr->u32Reserved = 0;
3789 pGuestMsr->u64Value = pMixedCtx->msrSFMASK; /* syscall flag mask */
3790 pGuestMsr++; cGuestMsrs++;
3791 pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
3792 pGuestMsr->u32Reserved = 0;
3793 pGuestMsr->u64Value = pMixedCtx->msrKERNELGSBASE; /* swapgs exchange value */
3794 pGuestMsr++; cGuestMsrs++;
3795 }
3796
3797 /*
3798 * RDTSCP requires the TSC_AUX MSR. Host and guest share the physical MSR. So we have to
3799 * load the guest's copy if the guest can execute RDTSCP without causing VM-exits.
3800 */
3801 if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP)
3802 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP))
3803 {
3804 pGuestMsr->u32IndexMSR = MSR_K8_TSC_AUX;
3805 pGuestMsr->u32Reserved = 0;
3806 rc = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pGuestMsr->u64Value);
3807 AssertRCReturn(rc, rc);
3808 pGuestMsr++; cGuestMsrs++;
3809 }
3810
3811 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
3812 if (cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc))
3813 {
3814 LogRel(("CPU autoload/store MSR count in VMCS exceeded cGuestMsrs=%u.\n", cGuestMsrs));
3815 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3816 }
3817
3818 /* Update the VCPU's copy of the guest MSR count. */
3819 pVCpu->hm.s.vmx.cGuestMsrs = cGuestMsrs;
3820 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cGuestMsrs); AssertRCReturn(rc, rc);
3821 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs); AssertRCReturn(rc, rc);
3822#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
3823
3824 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_AUTO_MSRS;
3825 }
3826
3827 /*
3828 * Guest Sysenter MSRs.
3829 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
3830 * VM-exits on WRMSRs for these MSRs.
3831 */
3832 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
3833 {
3834 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc);
3835 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_CS_MSR;
3836 }
3837 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
3838 {
3839 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc);
3840 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR;
3841 }
3842 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
3843 {
3844 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc);
3845 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR;
3846 }
3847
3848 return rc;
3849}
3850
3851
3852/**
3853 * Loads the guest activity state into the guest-state area in the VMCS.
3854 *
3855 * @returns VBox status code.
3856 * @param pVCpu Pointer to the VMCPU.
3857 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3858 * out-of-sync. Make sure to update the required fields
3859 * before using them.
3860 *
3861 * @remarks No-long-jump zone!!!
3862 */
3863static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx)
3864{
3865 /** @todo See if we can make use of other states, e.g.
3866 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */
3867 int rc = VINF_SUCCESS;
3868 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_ACTIVITY_STATE)
3869 {
3870 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
3871 AssertRCReturn(rc, rc);
3872 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_ACTIVITY_STATE;
3873 }
3874 return rc;
3875}
3876
3877
3878/**
3879 * Sets up the appropriate function to run guest code.
3880 *
3881 * @returns VBox status code.
3882 * @param pVCpu Pointer to the VMCPU.
3883 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3884 * out-of-sync. Make sure to update the required fields
3885 * before using them.
3886 *
3887 * @remarks No-long-jump zone!!!
3888 */
3889static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3890{
3891 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3892 {
3893#ifndef VBOX_ENABLE_64_BITS_GUESTS
3894 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
3895#endif
3896 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
3897#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3898 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
3899 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
3900#else
3901 /* 64-bit host or hybrid host. */
3902 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
3903#endif
3904 }
3905 else
3906 {
3907 /* Guest is not in long mode, use the 32-bit handler. */
3908 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
3909 }
3910 Assert(pVCpu->hm.s.vmx.pfnStartVM);
3911 return VINF_SUCCESS;
3912}
3913
3914
3915/**
3916 * Wrapper for running the guest code in VT-x.
3917 *
3918 * @returns VBox strict status code.
3919 * @param pVM Pointer to the VM.
3920 * @param pVCpu Pointer to the VMCPU.
3921 * @param pCtx Pointer to the guest-CPU context.
3922 *
3923 * @remarks No-long-jump zone!!!
3924 */
3925DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3926{
3927 /*
3928 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
3929 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
3930 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
3931 */
3932#ifdef VBOX_WITH_KERNEL_USING_XMM
3933 return HMR0VMXStartVMWrapXMM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
3934#else
3935 return pVCpu->hm.s.vmx.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
3936#endif
3937}
3938
3939
3940/**
3941 * Reports world-switch error and dumps some useful debug info.
3942 *
3943 * @param pVM Pointer to the VM.
3944 * @param pVCpu Pointer to the VMCPU.
3945 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
3946 * @param pCtx Pointer to the guest-CPU context.
3947 * @param pVmxTransient Pointer to the VMX transient structure (only
3948 * exitReason updated).
3949 */
3950static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
3951{
3952 Assert(pVM);
3953 Assert(pVCpu);
3954 Assert(pCtx);
3955 Assert(pVmxTransient);
3956 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3957
3958 Log4(("VM-entry failure: %Rrc\n", rcVMRun));
3959 switch (rcVMRun)
3960 {
3961 case VERR_VMX_INVALID_VMXON_PTR:
3962 AssertFailed();
3963 break;
3964 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
3965 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
3966 {
3967 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.lasterror.u32ExitReason);
3968 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.lasterror.u32InstrError);
3969 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
3970 AssertRC(rc);
3971
3972#ifdef VBOX_STRICT
3973 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.lasterror.u32ExitReason,
3974 pVmxTransient->uExitReason));
3975 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification));
3976 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.lasterror.u32InstrError));
3977 if (pVCpu->hm.s.vmx.lasterror.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
3978 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.lasterror.u32InstrError]));
3979 else
3980 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
3981
3982 /* VMX control bits. */
3983 uint32_t u32Val;
3984 uint64_t u64Val;
3985 HMVMXHCUINTREG uHCReg;
3986 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc);
3987 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));
3988 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc);
3989 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));
3990 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc);
3991 Log4(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));
3992 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc);
3993 Log4(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));
3994 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc);
3995 Log4(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));
3996 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
3997 Log4(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
3998 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
3999 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
4000 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
4001 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
4002 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
4003 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
4004 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
4005 Log4(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
4006 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
4007 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
4008 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4009 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
4010 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4011 Log4(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
4012 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
4013 Log4(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
4014 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
4015 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
4016 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
4017 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
4018 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
4019 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
4020 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
4021 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4022 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
4023 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
4024 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
4025 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4026 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
4027 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
4028
4029 /* Guest bits. */
4030 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc);
4031 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pCtx->rip, u64Val));
4032 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc);
4033 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pCtx->rsp, u64Val));
4034 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
4035 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
4036 rc = VMXReadVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, &u32Val); AssertRC(rc);
4037 Log4(("VMX_VMCS16_GUEST_FIELD_VPID %u\n", u32Val));
4038
4039 /* Host bits. */
4040 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
4041 Log4(("Host CR0 %#RHr\n", uHCReg));
4042 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
4043 Log4(("Host CR3 %#RHr\n", uHCReg));
4044 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
4045 Log4(("Host CR4 %#RHr\n", uHCReg));
4046
4047 RTGDTR HostGdtr;
4048 PCX86DESCHC pDesc;
4049 ASMGetGDTR(&HostGdtr);
4050 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_CS, &u32Val); AssertRC(rc);
4051 Log4(("Host CS %#08x\n", u32Val));
4052 if (u32Val < HostGdtr.cbGdt)
4053 {
4054 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4055 HMR0DumpDescriptor(pDesc, u32Val, "CS: ");
4056 }
4057
4058 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_DS, &u32Val); AssertRC(rc);
4059 Log4(("Host DS %#08x\n", u32Val));
4060 if (u32Val < HostGdtr.cbGdt)
4061 {
4062 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4063 HMR0DumpDescriptor(pDesc, u32Val, "DS: ");
4064 }
4065
4066 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_ES, &u32Val); AssertRC(rc);
4067 Log4(("Host ES %#08x\n", u32Val));
4068 if (u32Val < HostGdtr.cbGdt)
4069 {
4070 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4071 HMR0DumpDescriptor(pDesc, u32Val, "ES: ");
4072 }
4073
4074 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_FS, &u32Val); AssertRC(rc);
4075 Log4(("Host FS %#08x\n", u32Val));
4076 if (u32Val < HostGdtr.cbGdt)
4077 {
4078 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4079 HMR0DumpDescriptor(pDesc, u32Val, "FS: ");
4080 }
4081
4082 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_GS, &u32Val); AssertRC(rc);
4083 Log4(("Host GS %#08x\n", u32Val));
4084 if (u32Val < HostGdtr.cbGdt)
4085 {
4086 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4087 HMR0DumpDescriptor(pDesc, u32Val, "GS: ");
4088 }
4089
4090 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_SS, &u32Val); AssertRC(rc);
4091 Log4(("Host SS %#08x\n", u32Val));
4092 if (u32Val < HostGdtr.cbGdt)
4093 {
4094 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4095 HMR0DumpDescriptor(pDesc, u32Val, "SS: ");
4096 }
4097
4098 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_TR, &u32Val); AssertRC(rc);
4099 Log4(("Host TR %#08x\n", u32Val));
4100 if (u32Val < HostGdtr.cbGdt)
4101 {
4102 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4103 HMR0DumpDescriptor(pDesc, u32Val, "TR: ");
4104 }
4105
4106 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
4107 Log4(("Host TR Base %#RHv\n", uHCReg));
4108 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
4109 Log4(("Host GDTR Base %#RHv\n", uHCReg));
4110 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
4111 Log4(("Host IDTR Base %#RHv\n", uHCReg));
4112 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
4113 Log4(("Host SYSENTER CS %#08x\n", u32Val));
4114 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
4115 Log4(("Host SYSENTER EIP %#RHv\n", uHCReg));
4116 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
4117 Log4(("Host SYSENTER ESP %#RHv\n", uHCReg));
4118 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
4119 Log4(("Host RSP %#RHv\n", uHCReg));
4120 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
4121 Log4(("Host RIP %#RHv\n", uHCReg));
4122# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4123 if (HMVMX_IS_64BIT_HOST_MODE())
4124 {
4125 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
4126 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
4127 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
4128 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
4129 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
4130 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
4131 }
4132# endif
4133#endif /* VBOX_STRICT */
4134 break;
4135 }
4136
4137 default:
4138 /* Impossible */
4139 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
4140 break;
4141 }
4142 NOREF(pVM);
4143}
4144
4145
4146#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4147#ifndef VMX_USE_CACHED_VMCS_ACCESSES
4148# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
4149#endif
4150#ifdef VBOX_STRICT
4151static bool hmR0VmxIsValidWriteField(uint32_t idxField)
4152{
4153 switch (idxField)
4154 {
4155 case VMX_VMCS_GUEST_RIP:
4156 case VMX_VMCS_GUEST_RSP:
4157 case VMX_VMCS_GUEST_SYSENTER_EIP:
4158 case VMX_VMCS_GUEST_SYSENTER_ESP:
4159 case VMX_VMCS_GUEST_GDTR_BASE:
4160 case VMX_VMCS_GUEST_IDTR_BASE:
4161 case VMX_VMCS_GUEST_CS_BASE:
4162 case VMX_VMCS_GUEST_DS_BASE:
4163 case VMX_VMCS_GUEST_ES_BASE:
4164 case VMX_VMCS_GUEST_FS_BASE:
4165 case VMX_VMCS_GUEST_GS_BASE:
4166 case VMX_VMCS_GUEST_SS_BASE:
4167 case VMX_VMCS_GUEST_LDTR_BASE:
4168 case VMX_VMCS_GUEST_TR_BASE:
4169 case VMX_VMCS_GUEST_CR3:
4170 return true;
4171 }
4172 return false;
4173}
4174
4175static bool hmR0VmxIsValidReadField(uint32_t idxField)
4176{
4177 switch (idxField)
4178 {
4179 /* Read-only fields. */
4180 case VMX_VMCS_RO_EXIT_QUALIFICATION:
4181 return true;
4182 }
4183 /* Remaining readable fields should also be writable. */
4184 return hmR0VmxIsValidWriteField(idxField);
4185}
4186#endif /* VBOX_STRICT */
4187
4188
4189/**
4190 * Executes the specified handler in 64-bit mode.
4191 *
4192 * @returns VBox status code.
4193 * @param pVM Pointer to the VM.
4194 * @param pVCpu Pointer to the VMCPU.
4195 * @param pCtx Pointer to the guest CPU context.
4196 * @param enmOp The operation to perform.
4197 * @param cbParam Number of parameters.
4198 * @param paParam Array of 32-bit parameters.
4199 */
4200VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
4201 uint32_t *paParam)
4202{
4203 int rc, rc2;
4204 PHMGLOBLCPUINFO pCpu;
4205 RTHCPHYS HCPhysCpuPage;
4206 RTCCUINTREG uOldEFlags;
4207
4208 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
4209 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
4210 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
4211 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
4212
4213#ifdef VBOX_STRICT
4214 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
4215 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
4216
4217 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
4218 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
4219#endif
4220
4221 /* Disable interrupts. */
4222 uOldEFlags = ASMIntDisableFlags();
4223
4224#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
4225 RTCPUID idHostCpu = RTMpCpuId();
4226 CPUMR0SetLApic(pVM, idHostCpu);
4227#endif
4228
4229 pCpu = HMR0GetCurrentCpu();
4230 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
4231
4232 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
4233 VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
4234
4235 /* Leave VMX Root Mode. */
4236 VMXDisable();
4237
4238 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
4239
4240 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
4241 CPUMSetHyperEIP(pVCpu, enmOp);
4242 for (int i = (int)cbParam - 1; i >= 0; i--)
4243 CPUMPushHyper(pVCpu, paParam[i]);
4244
4245 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
4246
4247 /* Call the switcher. */
4248 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
4249 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
4250
4251 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
4252 /* Make sure the VMX instructions don't cause #UD faults. */
4253 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
4254
4255 /* Re-enter VMX Root Mode */
4256 rc2 = VMXEnable(HCPhysCpuPage);
4257 if (RT_FAILURE(rc2))
4258 {
4259 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
4260 ASMSetFlags(uOldEFlags);
4261 return rc2;
4262 }
4263
4264 rc2 = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
4265 AssertRC(rc2);
4266 Assert(!(ASMGetFlags() & X86_EFL_IF));
4267 ASMSetFlags(uOldEFlags);
4268 return rc;
4269}
4270
4271
4272/**
4273 * Prepares for and executes VMLAUNCH (64 bits guests) for 32-bit hosts
4274 * supporting 64-bit guests.
4275 *
4276 * @returns VBox status code.
4277 * @param fResume Whether to VMLAUNCH or VMRESUME.
4278 * @param pCtx Pointer to the guest-CPU context.
4279 * @param pCache Pointer to the VMCS cache.
4280 * @param pVM Pointer to the VM.
4281 * @param pVCpu Pointer to the VMCPU.
4282 */
4283DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
4284{
4285 uint32_t aParam[6];
4286 PHMGLOBLCPUINFO pCpu = NULL;
4287 RTHCPHYS HCPhysCpuPage = 0;
4288 int rc = VERR_INTERNAL_ERROR_5;
4289
4290 pCpu = HMR0GetCurrentCpu();
4291 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
4292
4293#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4294 pCache->uPos = 1;
4295 pCache->interPD = PGMGetInterPaeCR3(pVM);
4296 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
4297#endif
4298
4299#ifdef VBOX_STRICT
4300 pCache->TestIn.HCPhysCpuPage = 0;
4301 pCache->TestIn.HCPhysVmcs = 0;
4302 pCache->TestIn.pCache = 0;
4303 pCache->TestOut.HCPhysVmcs = 0;
4304 pCache->TestOut.pCache = 0;
4305 pCache->TestOut.pCtx = 0;
4306 pCache->TestOut.eflags = 0;
4307#endif
4308
4309 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
4310 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */
4311 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
4312 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
4313 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
4314 aParam[5] = 0;
4315
4316#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4317 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
4318 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
4319#endif
4320 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, 6, &aParam[0]);
4321
4322#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4323 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
4324 Assert(pCtx->dr[4] == 10);
4325 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
4326#endif
4327
4328#ifdef VBOX_STRICT
4329 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
4330 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
4331 pVCpu->hm.s.vmx.HCPhysVmcs));
4332 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
4333 pCache->TestOut.HCPhysVmcs));
4334 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
4335 pCache->TestOut.pCache));
4336 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
4337 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
4338 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
4339 pCache->TestOut.pCtx));
4340 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
4341#endif
4342 return rc;
4343}
4344
4345
4346/**
4347 * Initialize the VMCS-Read cache. The VMCS cache is used for 32-bit hosts
4348 * running 64-bit guests (except 32-bit Darwin which runs with 64-bit paging in
4349 * 32-bit mode) for 64-bit fields that cannot be accessed in 32-bit mode. Some
4350 * 64-bit fields -can- be accessed (those that have a 32-bit FULL & HIGH part).
4351 *
4352 * @returns VBox status code.
4353 * @param pVM Pointer to the VM.
4354 * @param pVCpu Pointer to the VMCPU.
4355 */
4356static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
4357{
4358#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
4359{ \
4360 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
4361 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
4362 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
4363 ++cReadFields; \
4364}
4365
4366 AssertPtr(pVM);
4367 AssertPtr(pVCpu);
4368 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
4369 uint32_t cReadFields = 0;
4370
4371 /*
4372 * Don't remove the #if 0'd fields in this code. They're listed here for consistency
4373 * and serve to indicate exceptions to the rules.
4374 */
4375
4376 /* Guest-natural selector base fields. */
4377#if 0
4378 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
4379 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
4380 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
4381#endif
4382 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
4383 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
4384 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
4385 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
4386 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
4387 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
4388 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
4389 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
4390 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
4391 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
4392 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
4393 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
4394#if 0
4395 /* Unused natural width guest-state fields. */
4396 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
4397 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
4398#endif
4399 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
4400 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
4401
4402 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
4403#if 0
4404 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
4405 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
4406 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
4407 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
4408 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
4409 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
4410 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
4411 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
4412 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
4413#endif
4414
4415 /* Natural width guest-state fields. */
4416 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
4417#if 0
4418 /* Currently unused field. */
4419 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR);
4420#endif
4421
4422 if (pVM->hm.s.fNestedPaging)
4423 {
4424 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
4425 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
4426 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
4427 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
4428 }
4429 else
4430 {
4431 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
4432 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
4433 }
4434
4435#undef VMXLOCAL_INIT_READ_CACHE_FIELD
4436 return VINF_SUCCESS;
4437}
4438
4439
4440/**
4441 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
4442 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
4443 * darwin, running 64-bit guests).
4444 *
4445 * @returns VBox status code.
4446 * @param pVCpu Pointer to the VMCPU.
4447 * @param idxField The VMCS field encoding.
4448 * @param u64Val 16, 32 or 64 bits value.
4449 */
4450VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4451{
4452 int rc;
4453 switch (idxField)
4454 {
4455 /*
4456 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
4457 */
4458 /* 64-bit Control fields. */
4459 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
4460 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
4461 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
4462 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
4463 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
4464 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
4465 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
4466 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
4467 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
4468 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
4469 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
4470 case VMX_VMCS64_CTRL_EPTP_FULL:
4471 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
4472 /* 64-bit Guest-state fields. */
4473 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
4474 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
4475 case VMX_VMCS64_GUEST_PAT_FULL:
4476 case VMX_VMCS64_GUEST_EFER_FULL:
4477 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
4478 case VMX_VMCS64_GUEST_PDPTE0_FULL:
4479 case VMX_VMCS64_GUEST_PDPTE1_FULL:
4480 case VMX_VMCS64_GUEST_PDPTE2_FULL:
4481 case VMX_VMCS64_GUEST_PDPTE3_FULL:
4482 /* 64-bit Host-state fields. */
4483 case VMX_VMCS64_HOST_FIELD_PAT_FULL:
4484 case VMX_VMCS64_HOST_FIELD_EFER_FULL:
4485 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
4486 {
4487 rc = VMXWriteVmcs32(idxField, u64Val);
4488 rc |= VMXWriteVmcs32(idxField + 1, (uint32_t)(u64Val >> 32));
4489 break;
4490 }
4491
4492 /*
4493 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
4494 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
4495 */
4496 /* Natural-width Guest-state fields. */
4497 case VMX_VMCS_GUEST_CR3:
4498 case VMX_VMCS_GUEST_ES_BASE:
4499 case VMX_VMCS_GUEST_CS_BASE:
4500 case VMX_VMCS_GUEST_SS_BASE:
4501 case VMX_VMCS_GUEST_DS_BASE:
4502 case VMX_VMCS_GUEST_FS_BASE:
4503 case VMX_VMCS_GUEST_GS_BASE:
4504 case VMX_VMCS_GUEST_LDTR_BASE:
4505 case VMX_VMCS_GUEST_TR_BASE:
4506 case VMX_VMCS_GUEST_GDTR_BASE:
4507 case VMX_VMCS_GUEST_IDTR_BASE:
4508 case VMX_VMCS_GUEST_RSP:
4509 case VMX_VMCS_GUEST_RIP:
4510 case VMX_VMCS_GUEST_SYSENTER_ESP:
4511 case VMX_VMCS_GUEST_SYSENTER_EIP:
4512 {
4513 if (!(u64Val >> 32))
4514 {
4515 /* If this field is 64-bit, VT-x will zero out the top bits. */
4516 rc = VMXWriteVmcs32(idxField, (uint32_t)u64Val);
4517 }
4518 else
4519 {
4520 /* Assert that only the 32->64 switcher case should ever come here. */
4521 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
4522 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
4523 }
4524 break;
4525 }
4526
4527 default:
4528 {
4529 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
4530 rc = VERR_INVALID_PARAMETER;
4531 break;
4532 }
4533 }
4534 AssertRCReturn(rc, rc);
4535 return rc;
4536}
4537
4538
4539/**
4540 * Queue up a VMWRITE by using the VMCS write cache. This is only used on 32-bit
4541 * hosts (except darwin) for 64-bit guests.
4542 *
4543 * @param pVCpu Pointer to the VMCPU.
4544 * @param idxField The VMCS field encoding.
4545 * @param u64Val 16, 32 or 64 bits value.
4546 */
4547VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4548{
4549 AssertPtr(pVCpu);
4550 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
4551
4552 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
4553 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
4554
4555 /* Make sure there are no duplicates. */
4556 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
4557 {
4558 if (pCache->Write.aField[i] == idxField)
4559 {
4560 pCache->Write.aFieldVal[i] = u64Val;
4561 return VINF_SUCCESS;
4562 }
4563 }
4564
4565 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
4566 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
4567 pCache->Write.cValidEntries++;
4568 return VINF_SUCCESS;
4569}
4570
4571/* Enable later when the assembly code uses these as callbacks. */
4572#if 0
4573/*
4574 * Loads the VMCS write-cache into the CPU (by executing VMWRITEs).
4575 *
4576 * @param pVCpu Pointer to the VMCPU.
4577 * @param pCache Pointer to the VMCS cache.
4578 *
4579 * @remarks No-long-jump zone!!!
4580 */
4581VMMR0DECL(void) VMXWriteCachedVmcsLoad(PVMCPU pVCpu, PVMCSCACHE pCache)
4582{
4583 AssertPtr(pCache);
4584 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
4585 {
4586 int rc = VMXWriteVmcs64(pCache->Write.aField[i], pCache->Write.aFieldVal[i]);
4587 AssertRC(rc);
4588 }
4589 pCache->Write.cValidEntries = 0;
4590}
4591
4592
4593/**
4594 * Stores the VMCS read-cache from the CPU (by executing VMREADs).
4595 *
4596 * @param pVCpu Pointer to the VMCPU.
4597 * @param pCache Pointer to the VMCS cache.
4598 *
4599 * @remarks No-long-jump zone!!!
4600 */
4601VMMR0DECL(void) VMXReadCachedVmcsStore(PVMCPU pVCpu, PVMCSCACHE pCache)
4602{
4603 AssertPtr(pCache);
4604 for (uint32_t i = 0; i < pCache->Read.cValidEntries; i++)
4605 {
4606 int rc = VMXReadVmcs64(pCache->Read.aField[i], &pCache->Read.aFieldVal[i]);
4607 AssertRC(rc);
4608 }
4609}
4610#endif
4611#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
4612
4613
4614/**
4615 * Sets up the usage of TSC-offsetting and updates the VMCS. If offsetting is
4616 * not possible, cause VM-exits on RDTSC(P)s. Also sets up the VMX preemption
4617 * timer.
4618 *
4619 * @returns VBox status code.
4620 * @param pVCpu Pointer to the VMCPU.
4621 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4622 * out-of-sync. Make sure to update the required fields
4623 * before using them.
4624 * @remarks No-long-jump zone!!!
4625 */
4626static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4627{
4628 int rc = VERR_INTERNAL_ERROR_5;
4629 bool fOffsettedTsc = false;
4630 PVM pVM = pVCpu->CTX_SUFF(pVM);
4631 if (pVM->hm.s.vmx.fUsePreemptTimer)
4632 {
4633 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hm.s.vmx.u64TSCOffset);
4634
4635 /* Make sure the returned values have sane upper and lower boundaries. */
4636 uint64_t u64CpuHz = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
4637 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
4638 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
4639 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
4640
4641 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
4642 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc);
4643 }
4644 else
4645 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset);
4646
4647 if (fOffsettedTsc)
4648 {
4649 uint64_t u64CurTSC = ASMReadTSC();
4650 if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
4651 {
4652 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
4653 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc);
4654
4655 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
4656 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4657 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
4658 }
4659 else
4660 {
4661 /* VM-exit on RDTSC(P) as we would otherwise pass decreasing TSC values to the guest. */
4662 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
4663 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4664 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
4665 }
4666 }
4667 else
4668 {
4669 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
4670 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
4671 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4672 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
4673 }
4674}
4675
4676
4677/**
4678 * Determines if an exception is a contributory exception. Contributory
4679 * exceptions are ones which can cause double-faults. Page-fault is
4680 * intentionally not included here as it's a conditional contributory exception.
4681 *
4682 * @returns true if the exception is contributory, false otherwise.
4683 * @param uVector The exception vector.
4684 */
4685DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
4686{
4687 switch (uVector)
4688 {
4689 case X86_XCPT_GP:
4690 case X86_XCPT_SS:
4691 case X86_XCPT_NP:
4692 case X86_XCPT_TS:
4693 case X86_XCPT_DE:
4694 return true;
4695 default:
4696 break;
4697 }
4698 return false;
4699}
4700
4701
4702/**
4703 * Sets an event as a pending event to be injected into the guest.
4704 *
4705 * @param pVCpu Pointer to the VMCPU.
4706 * @param u32IntrInfo The VM-entry interruption-information field.
4707 * @param cbInstr The VM-entry instruction length in bytes (for software
4708 * interrupts, exceptions and privileged software
4709 * exceptions).
4710 * @param u32ErrCode The VM-entry exception error code.
4711 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
4712 * page-fault.
4713 */
4714DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntrInfo, uint32_t cbInstr, uint32_t u32ErrCode,
4715 RTGCUINTPTR GCPtrFaultAddress)
4716{
4717 Assert(!pVCpu->hm.s.Event.fPending);
4718 pVCpu->hm.s.Event.fPending = true;
4719 pVCpu->hm.s.Event.u64IntrInfo = u32IntrInfo;
4720 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
4721 pVCpu->hm.s.Event.cbInstr = cbInstr;
4722 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
4723}
4724
4725
4726/**
4727 * Sets a double-fault (#DF) exception as pending-for-injection into the VM.
4728 *
4729 * @param pVCpu Pointer to the VMCPU.
4730 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4731 * out-of-sync. Make sure to update the required fields
4732 * before using them.
4733 */
4734DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4735{
4736 uint32_t u32IntrInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
4737 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
4738 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
4739 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
4740}
4741
4742
4743/**
4744 * Handle a condition that occurred while delivering an event through the guest
4745 * IDT.
4746 *
4747 * @returns VBox status code (informational error codes included).
4748 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
4749 * @retval VINF_HM_DOUBLE_FAULT if a #DF condition was detected and we ought to
4750 * continue execution of the guest which will delivery the #DF.
4751 * @retval VINF_EM_RESET if we detected a triple-fault condition.
4752 *
4753 * @param pVCpu Pointer to the VMCPU.
4754 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4755 * out-of-sync. Make sure to update the required fields
4756 * before using them.
4757 * @param pVmxTransient Pointer to the VMX transient structure.
4758 *
4759 * @remarks No-long-jump zone!!!
4760 */
4761static int hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
4762{
4763 int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
4764 AssertRC(rc);
4765 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
4766 {
4767 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
4768 AssertRCReturn(rc, rc);
4769
4770 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
4771 uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntrInfo);
4772 uint32_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
4773
4774 typedef enum
4775 {
4776 VMXREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
4777 VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
4778 VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
4779 VMXREFLECTXCPT_NONE /* Nothing to reflect. */
4780 } VMXREFLECTXCPT;
4781
4782 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */
4783 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;
4784 if (VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntrInfo))
4785 {
4786 if (uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)
4787 {
4788 enmReflect = VMXREFLECTXCPT_XCPT;
4789#ifdef VBOX_STRICT
4790 if ( hmR0VmxIsContributoryXcpt(uIdtVector)
4791 && uExitVector == X86_XCPT_PF)
4792 {
4793 Log4(("IDT: vcpu[%RU32] Contributory #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
4794 }
4795#endif
4796 if ( uExitVector == X86_XCPT_PF
4797 && uIdtVector == X86_XCPT_PF)
4798 {
4799 pVmxTransient->fVectoringPF = true;
4800 Log4(("IDT: vcpu[%RU32] Vectoring #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
4801 }
4802 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
4803 && hmR0VmxIsContributoryXcpt(uExitVector)
4804 && ( hmR0VmxIsContributoryXcpt(uIdtVector)
4805 || uIdtVector == X86_XCPT_PF))
4806 {
4807 enmReflect = VMXREFLECTXCPT_DF;
4808 }
4809 else if (uIdtVector == X86_XCPT_DF)
4810 enmReflect = VMXREFLECTXCPT_TF;
4811 }
4812 else if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
4813 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
4814 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
4815 {
4816 /*
4817 * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and privileged software exception
4818 * (whatever they are) as they reoccur when restarting the instruction.
4819 */
4820 enmReflect = VMXREFLECTXCPT_XCPT;
4821 }
4822 }
4823 else
4824 {
4825 /*
4826 * If event delivery caused an EPT violation/misconfig or APIC access VM-exit, then the VM-exit
4827 * interruption-information will not be valid and we end up here. In such cases, it is sufficient to reflect the
4828 * original exception to the guest after handling the VM-exit.
4829 */
4830 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
4831 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
4832 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
4833 {
4834 enmReflect = VMXREFLECTXCPT_XCPT;
4835 }
4836 }
4837
4838 switch (enmReflect)
4839 {
4840 case VMXREFLECTXCPT_XCPT:
4841 {
4842 Assert( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4843 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
4844 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
4845
4846 uint32_t u32ErrCode = 0;
4847 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo))
4848 {
4849 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
4850 AssertRCReturn(rc, rc);
4851 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
4852 }
4853
4854 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */
4855 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INTR_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
4856 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
4857 rc = VINF_SUCCESS;
4858 Log4(("IDT: vcpu[%RU32] Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->idCpu,
4859 pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.u32ErrCode));
4860 break;
4861 }
4862
4863 case VMXREFLECTXCPT_DF:
4864 {
4865 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
4866 rc = VINF_HM_DOUBLE_FAULT;
4867 Log4(("IDT: vcpu[%RU32] Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->idCpu,
4868 pVCpu->hm.s.Event.u64IntrInfo, uIdtVector, uExitVector));
4869 break;
4870 }
4871
4872 case VMXREFLECTXCPT_TF:
4873 {
4874 rc = VINF_EM_RESET;
4875 Log4(("IDT: vcpu[%RU32] Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", pVCpu->idCpu, uIdtVector,
4876 uExitVector));
4877 break;
4878 }
4879
4880 default:
4881 Assert(rc == VINF_SUCCESS);
4882 break;
4883 }
4884 }
4885 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET);
4886 return rc;
4887}
4888
4889
4890/**
4891 * Saves the guest's CR0 register from the VMCS into the guest-CPU context.
4892 *
4893 * @returns VBox status code.
4894 * @param pVCpu Pointer to the VMCPU.
4895 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4896 * out-of-sync. Make sure to update the required fields
4897 * before using them.
4898 *
4899 * @remarks No-long-jump zone!!!
4900 */
4901static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4902{
4903 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0))
4904 {
4905 uint32_t uVal = 0;
4906 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal);
4907 AssertRCReturn(rc, rc);
4908 uint32_t uShadow = 0;
4909 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
4910 AssertRCReturn(rc, rc);
4911
4912 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR0Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR0Mask);
4913 CPUMSetGuestCR0(pVCpu, uVal);
4914 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR0;
4915 }
4916 return VINF_SUCCESS;
4917}
4918
4919
4920/**
4921 * Saves the guest's CR4 register from the VMCS into the guest-CPU context.
4922 *
4923 * @returns VBox status code.
4924 * @param pVCpu Pointer to the VMCPU.
4925 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4926 * out-of-sync. Make sure to update the required fields
4927 * before using them.
4928 *
4929 * @remarks No-long-jump zone!!!
4930 */
4931static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4932{
4933 int rc = VINF_SUCCESS;
4934 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4))
4935 {
4936 uint32_t uVal = 0;
4937 uint32_t uShadow = 0;
4938 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal);
4939 AssertRCReturn(rc, rc);
4940 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
4941 AssertRCReturn(rc, rc);
4942
4943 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR4Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR4Mask);
4944 CPUMSetGuestCR4(pVCpu, uVal);
4945 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR4;
4946 }
4947 return rc;
4948}
4949
4950
4951/**
4952 * Saves the guest's RIP register from the VMCS into the guest-CPU context.
4953 *
4954 * @returns VBox status code.
4955 * @param pVCpu Pointer to the VMCPU.
4956 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4957 * out-of-sync. Make sure to update the required fields
4958 * before using them.
4959 *
4960 * @remarks No-long-jump zone!!!
4961 */
4962static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4963{
4964 int rc = VINF_SUCCESS;
4965 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP))
4966 {
4967 uint64_t u64Val = 0;
4968 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
4969 AssertRCReturn(rc, rc);
4970
4971 pMixedCtx->rip = u64Val;
4972 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RIP;
4973 }
4974 return rc;
4975}
4976
4977
4978/**
4979 * Saves the guest's RSP register from the VMCS into the guest-CPU context.
4980 *
4981 * @returns VBox status code.
4982 * @param pVCpu Pointer to the VMCPU.
4983 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4984 * out-of-sync. Make sure to update the required fields
4985 * before using them.
4986 *
4987 * @remarks No-long-jump zone!!!
4988 */
4989static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4990{
4991 int rc = VINF_SUCCESS;
4992 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RSP))
4993 {
4994 uint64_t u64Val = 0;
4995 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
4996 AssertRCReturn(rc, rc);
4997
4998 pMixedCtx->rsp = u64Val;
4999 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RSP;
5000 }
5001 return rc;
5002}
5003
5004
5005/**
5006 * Saves the guest's RFLAGS from the VMCS into the guest-CPU context.
5007 *
5008 * @returns VBox status code.
5009 * @param pVCpu Pointer to the VMCPU.
5010 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5011 * out-of-sync. Make sure to update the required fields
5012 * before using them.
5013 *
5014 * @remarks No-long-jump zone!!!
5015 */
5016static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5017{
5018 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS))
5019 {
5020 uint32_t uVal = 0;
5021 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal);
5022 AssertRCReturn(rc, rc);
5023
5024 pMixedCtx->eflags.u32 = uVal;
5025 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) /* Undo our real-on-v86-mode changes to eflags if necessary. */
5026 {
5027 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
5028 Log4(("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32));
5029
5030 pMixedCtx->eflags.Bits.u1VM = 0;
5031 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.eflags.Bits.u2IOPL;
5032 }
5033
5034 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RFLAGS;
5035 }
5036 return VINF_SUCCESS;
5037}
5038
5039
5040/**
5041 * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the
5042 * guest-CPU context.
5043 */
5044DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5045{
5046 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
5047 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
5048 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
5049 return rc;
5050}
5051
5052
5053/**
5054 * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
5055 * from the guest-state area in the VMCS.
5056 *
5057 * @param pVCpu Pointer to the VMCPU.
5058 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5059 * out-of-sync. Make sure to update the required fields
5060 * before using them.
5061 *
5062 * @remarks No-long-jump zone!!!
5063 */
5064static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5065{
5066 uint32_t uIntrState = 0;
5067 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
5068 AssertRC(rc);
5069
5070 if (!uIntrState)
5071 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5072 else
5073 {
5074 Assert( uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
5075 || uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
5076 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
5077 AssertRC(rc);
5078 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */
5079 AssertRC(rc);
5080
5081 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
5082 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
5083 }
5084}
5085
5086
5087/**
5088 * Saves the guest's activity state.
5089 *
5090 * @returns VBox status code.
5091 * @param pVCpu Pointer to the VMCPU.
5092 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5093 * out-of-sync. Make sure to update the required fields
5094 * before using them.
5095 *
5096 * @remarks No-long-jump zone!!!
5097 */
5098static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5099{
5100 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
5101 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_ACTIVITY_STATE;
5102 return VINF_SUCCESS;
5103}
5104
5105
5106/**
5107 * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from
5108 * the current VMCS into the guest-CPU context.
5109 *
5110 * @returns VBox status code.
5111 * @param pVCpu Pointer to the VMCPU.
5112 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5113 * out-of-sync. Make sure to update the required fields
5114 * before using them.
5115 *
5116 * @remarks No-long-jump zone!!!
5117 */
5118static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5119{
5120 int rc = VINF_SUCCESS;
5121 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
5122 {
5123 uint32_t u32Val = 0;
5124 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc);
5125 pMixedCtx->SysEnter.cs = u32Val;
5126 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR;
5127 }
5128
5129 uint64_t u64Val = 0;
5130 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
5131 {
5132 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &u64Val); AssertRCReturn(rc, rc);
5133 pMixedCtx->SysEnter.eip = u64Val;
5134 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR;
5135 }
5136 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
5137 {
5138 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &u64Val); AssertRCReturn(rc, rc);
5139 pMixedCtx->SysEnter.esp = u64Val;
5140 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR;
5141 }
5142 return rc;
5143}
5144
5145
5146/**
5147 * Saves the guest FS_BASE MSRs from the current VMCS into the guest-CPU
5148 * context.
5149 *
5150 * @returns VBox status code.
5151 * @param pVCpu Pointer to the VMCPU.
5152 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5153 * out-of-sync. Make sure to update the required fields
5154 * before using them.
5155 *
5156 * @remarks No-long-jump zone!!!
5157 */
5158static int hmR0VmxSaveGuestFSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5159{
5160 int rc = VINF_SUCCESS;
5161 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_FS_BASE_MSR))
5162 {
5163 uint64_t u64Val = 0;
5164 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_FS_BASE, &u64Val); AssertRCReturn(rc, rc);
5165 pMixedCtx->fs.u64Base = u64Val;
5166 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_FS_BASE_MSR;
5167 }
5168 return rc;
5169}
5170
5171
5172/**
5173 * Saves the guest GS_BASE MSRs from the current VMCS into the guest-CPU
5174 * context.
5175 *
5176 * @returns VBox status code.
5177 * @param pVCpu Pointer to the VMCPU.
5178 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5179 * out-of-sync. Make sure to update the required fields
5180 * before using them.
5181 *
5182 * @remarks No-long-jump zone!!!
5183 */
5184static int hmR0VmxSaveGuestGSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5185{
5186 int rc = VINF_SUCCESS;
5187 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GS_BASE_MSR))
5188 {
5189 uint64_t u64Val = 0;
5190 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GS_BASE, &u64Val); AssertRCReturn(rc, rc);
5191 pMixedCtx->gs.u64Base = u64Val;
5192 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GS_BASE_MSR;
5193 }
5194 return rc;
5195}
5196
5197
5198/**
5199 * Saves the auto load/store'd guest MSRs from the current VMCS into the
5200 * guest-CPU context. Currently these are LSTAR, STAR, SFMASK, KERNEL-GS BASE
5201 * and TSC_AUX.
5202 *
5203 * @returns VBox status code.
5204 * @param pVCpu Pointer to the VMCPU.
5205 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5206 * out-of-sync. Make sure to update the required fields
5207 * before using them.
5208 *
5209 * @remarks No-long-jump zone!!!
5210 */
5211static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5212{
5213 if (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)
5214 return VINF_SUCCESS;
5215
5216#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
5217 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++)
5218 {
5219 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
5220 pMsr += i;
5221 switch (pMsr->u32IndexMSR)
5222 {
5223 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;
5224 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;
5225 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;
5226 case MSR_K8_TSC_AUX: CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value); break;
5227 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;
5228 case MSR_K6_EFER: /* EFER can't be changed without causing a VM-exit. */ break;
5229 default:
5230 {
5231 AssertFailed();
5232 return VERR_HM_UNEXPECTED_LD_ST_MSR;
5233 }
5234 }
5235 }
5236#endif
5237
5238 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS;
5239 return VINF_SUCCESS;
5240}
5241
5242
5243/**
5244 * Saves the guest control registers from the current VMCS into the guest-CPU
5245 * context.
5246 *
5247 * @returns VBox status code.
5248 * @param pVCpu Pointer to the VMCPU.
5249 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5250 * out-of-sync. Make sure to update the required fields
5251 * before using them.
5252 *
5253 * @remarks No-long-jump zone!!!
5254 */
5255static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5256{
5257 /* Guest CR0. Guest FPU. */
5258 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5259 AssertRCReturn(rc, rc);
5260
5261 /* Guest CR4. */
5262 rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
5263 AssertRCReturn(rc, rc);
5264
5265 /* Guest CR2 - updated always during the world-switch or in #PF. */
5266 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
5267 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR3))
5268 {
5269 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
5270 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4);
5271
5272 PVM pVM = pVCpu->CTX_SUFF(pVM);
5273 if ( pVM->hm.s.vmx.fUnrestrictedGuest
5274 || ( pVM->hm.s.fNestedPaging
5275 && CPUMIsGuestPagingEnabledEx(pMixedCtx)))
5276 {
5277 uint64_t u64Val = 0;
5278 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
5279 if (pMixedCtx->cr3 != u64Val)
5280 {
5281 CPUMSetGuestCR3(pVCpu, u64Val);
5282 if (VMMRZCallRing3IsEnabled(pVCpu))
5283 {
5284 PGMUpdateCR3(pVCpu, u64Val);
5285 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5286 }
5287 else
5288 {
5289 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
5290 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
5291 }
5292 }
5293
5294 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
5295 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR (EFER is always up-to-date). */
5296 {
5297 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
5298 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
5299 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
5300 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
5301
5302 if (VMMRZCallRing3IsEnabled(pVCpu))
5303 {
5304 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5305 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
5306 }
5307 else
5308 {
5309 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
5310 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
5311 }
5312 }
5313 }
5314
5315 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR3;
5316 }
5317
5318 /*
5319 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
5320 * -> VMMRZCallRing3Disable() -> hmR0VmxSaveGuestState() -> Set VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
5321 * -> continue with VM-exit handling -> hmR0VmxSaveGuestControlRegs() and here we are.
5322 *
5323 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
5324 */
5325 if (VMMRZCallRing3IsEnabled(pVCpu))
5326 {
5327 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
5328 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
5329
5330 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
5331 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5332
5333 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5334 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
5335 }
5336
5337 return rc;
5338}
5339
5340
5341/**
5342 * Reads a guest segment register from the current VMCS into the guest-CPU
5343 * context.
5344 *
5345 * @returns VBox status code.
5346 * @param pVCpu Pointer to the VMCPU.
5347 * @param idxSel Index of the selector in the VMCS.
5348 * @param idxLimit Index of the segment limit in the VMCS.
5349 * @param idxBase Index of the segment base in the VMCS.
5350 * @param idxAccess Index of the access rights of the segment in the VMCS.
5351 * @param pSelReg Pointer to the segment selector.
5352 *
5353 * @remarks No-long-jump zone!!!
5354 * @remarks Never call this function directly!!! Use the VMXLOCAL_READ_SEG()
5355 * macro as that takes care of whether to read from the VMCS cache or
5356 * not.
5357 */
5358DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
5359 PCPUMSELREG pSelReg)
5360{
5361 uint32_t u32Val = 0;
5362 int rc = VMXReadVmcs32(idxSel, &u32Val);
5363 AssertRCReturn(rc, rc);
5364 pSelReg->Sel = (uint16_t)u32Val;
5365 pSelReg->ValidSel = (uint16_t)u32Val;
5366 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
5367
5368 rc = VMXReadVmcs32(idxLimit, &u32Val);
5369 AssertRCReturn(rc, rc);
5370 pSelReg->u32Limit = u32Val;
5371
5372 uint64_t u64Val = 0;
5373 rc = VMXReadVmcsGstNByIdxVal(idxBase, &u64Val);
5374 AssertRCReturn(rc, rc);
5375 pSelReg->u64Base = u64Val;
5376
5377 rc = VMXReadVmcs32(idxAccess, &u32Val);
5378 AssertRCReturn(rc, rc);
5379 pSelReg->Attr.u = u32Val;
5380
5381 /*
5382 * If VT-x marks the segment as unusable, the rest of the attributes are undefined with certain exceptions (some bits in
5383 * CS, SS). Regardless, we have to clear the bits here and only retain the unusable bit because the unusable bit is specific
5384 * to VT-x, everyone else relies on the attribute being zero and have no clue what the unusable bit is.
5385 *
5386 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
5387 */
5388 if (pSelReg->Attr.u & HMVMX_SEL_UNUSABLE)
5389 {
5390 Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR); /* TR is the only selector that can never be unusable. */
5391 pSelReg->Attr.u = HMVMX_SEL_UNUSABLE;
5392 }
5393 return VINF_SUCCESS;
5394}
5395
5396
5397#ifdef VMX_USE_CACHED_VMCS_ACCESSES
5398#define VMXLOCAL_READ_SEG(Sel, CtxSel) \
5399 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
5400 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
5401#else
5402#define VMXLOCAL_READ_SEG(Sel, CtxSel) \
5403 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
5404 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
5405#endif
5406
5407
5408/**
5409 * Saves the guest segment registers from the current VMCS into the guest-CPU
5410 * context.
5411 *
5412 * @returns VBox status code.
5413 * @param pVCpu Pointer to the VMCPU.
5414 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5415 * out-of-sync. Make sure to update the required fields
5416 * before using them.
5417 *
5418 * @remarks No-long-jump zone!!!
5419 */
5420static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5421{
5422 /* Guest segment registers. */
5423 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SEGMENT_REGS))
5424 {
5425 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); AssertRCReturn(rc, rc);
5426 rc = VMXLOCAL_READ_SEG(CS, cs); AssertRCReturn(rc, rc);
5427 rc = VMXLOCAL_READ_SEG(SS, ss); AssertRCReturn(rc, rc);
5428 rc = VMXLOCAL_READ_SEG(DS, ds); AssertRCReturn(rc, rc);
5429 rc = VMXLOCAL_READ_SEG(ES, es); AssertRCReturn(rc, rc);
5430 rc = VMXLOCAL_READ_SEG(FS, fs); AssertRCReturn(rc, rc);
5431 rc = VMXLOCAL_READ_SEG(GS, gs); AssertRCReturn(rc, rc);
5432
5433 /* Restore segment attributes for real-on-v86 mode hack. */
5434 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
5435 {
5436 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrCS.u;
5437 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrSS.u;
5438 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrDS.u;
5439 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrES.u;
5440 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrFS.u;
5441 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrGS.u;
5442 }
5443 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SEGMENT_REGS;
5444 }
5445
5446 return VINF_SUCCESS;
5447}
5448
5449
5450/**
5451 * Saves the guest descriptor table registers and task register from the current
5452 * VMCS into the guest-CPU context.
5453 *
5454 * @returns VBox status code.
5455 * @param pVCpu Pointer to the VMCPU.
5456 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5457 * out-of-sync. Make sure to update the required fields
5458 * before using them.
5459 *
5460 * @remarks No-long-jump zone!!!
5461 */
5462static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5463{
5464 int rc = VINF_SUCCESS;
5465
5466 /* Guest LDTR. */
5467 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_LDTR))
5468 {
5469 rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
5470 AssertRCReturn(rc, rc);
5471 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LDTR;
5472 }
5473
5474 /* Guest GDTR. */
5475 uint64_t u64Val = 0;
5476 uint32_t u32Val = 0;
5477 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GDTR))
5478 {
5479 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
5480 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
5481 pMixedCtx->gdtr.pGdt = u64Val;
5482 pMixedCtx->gdtr.cbGdt = u32Val;
5483 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GDTR;
5484 }
5485
5486 /* Guest IDTR. */
5487 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_IDTR))
5488 {
5489 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
5490 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
5491 pMixedCtx->idtr.pIdt = u64Val;
5492 pMixedCtx->idtr.cbIdt = u32Val;
5493 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_IDTR;
5494 }
5495
5496 /* Guest TR. */
5497 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_TR))
5498 {
5499 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5500 AssertRCReturn(rc, rc);
5501
5502 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
5503 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
5504 {
5505 rc = VMXLOCAL_READ_SEG(TR, tr);
5506 AssertRCReturn(rc, rc);
5507 }
5508 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_TR;
5509 }
5510 return rc;
5511}
5512
5513#undef VMXLOCAL_READ_SEG
5514
5515
5516/**
5517 * Saves the guest debug registers from the current VMCS into the guest-CPU
5518 * context.
5519 *
5520 * @returns VBox status code.
5521 * @param pVCpu Pointer to the VMCPU.
5522 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5523 * out-of-sync. Make sure to update the required fields
5524 * before using them.
5525 *
5526 * @remarks No-long-jump zone!!!
5527 */
5528static int hmR0VmxSaveGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5529{
5530 int rc = VINF_SUCCESS;
5531 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_DEBUG))
5532 {
5533 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
5534 uint32_t u32Val;
5535 rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); AssertRCReturn(rc, rc);
5536 pMixedCtx->dr[7] = u32Val;
5537
5538 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_DEBUG;
5539 }
5540 return rc;
5541}
5542
5543
5544/**
5545 * Saves the guest APIC state from the currentl VMCS into the guest-CPU context.
5546 *
5547 * @returns VBox status code.
5548 * @param pVCpu Pointer to the VMCPU.
5549 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5550 * out-of-sync. Make sure to update the required fields
5551 * before using them.
5552 *
5553 * @remarks No-long-jump zone!!!
5554 */
5555static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5556{
5557 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
5558 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_APIC_STATE;
5559 return VINF_SUCCESS;
5560}
5561
5562
5563/**
5564 * Saves the entire guest state from the currently active VMCS into the
5565 * guest-CPU context. This essentially VMREADs all guest-data.
5566 *
5567 * @returns VBox status code.
5568 * @param pVCpu Pointer to the VMCPU.
5569 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5570 * out-of-sync. Make sure to update the required fields
5571 * before using them.
5572 */
5573static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5574{
5575 Assert(pVCpu);
5576 Assert(pMixedCtx);
5577
5578 if (pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL)
5579 return VINF_SUCCESS;
5580
5581 /* Though we can longjmp to ring-3 due to log-flushes here and get recalled again on the ring-3 callback path,
5582 there is no real need to. */
5583 if (VMMRZCallRing3IsEnabled(pVCpu))
5584 VMMR0LogFlushDisable(pVCpu);
5585 else
5586 Assert(VMMR0IsLogFlushDisabled(pVCpu));
5587 Log4Func(("vcpu[%RU32]\n", pVCpu->idCpu));
5588
5589 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
5590 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5591
5592 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
5593 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5594
5595 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
5596 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5597
5598 rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
5599 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5600
5601 rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx);
5602 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5603
5604 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
5605 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5606
5607 rc = hmR0VmxSaveGuestFSBaseMsr(pVCpu, pMixedCtx);
5608 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestFSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5609
5610 rc = hmR0VmxSaveGuestGSBaseMsr(pVCpu, pMixedCtx);
5611 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5612
5613 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
5614 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5615
5616 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
5617 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5618
5619 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
5620 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5621
5622 AssertMsg(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL,
5623 ("Missed guest state bits while saving state; residue %RX32\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
5624
5625 if (VMMRZCallRing3IsEnabled(pVCpu))
5626 VMMR0LogFlushEnable(pVCpu);
5627
5628 return rc;
5629}
5630
5631
5632/**
5633 * Check per-VM and per-VCPU force flag actions that require us to go back to
5634 * ring-3 for one reason or another.
5635 *
5636 * @returns VBox status code (information status code included).
5637 * @retval VINF_SUCCESS if we don't have any actions that require going back to
5638 * ring-3.
5639 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
5640 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
5641 * interrupts)
5642 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
5643 * all EMTs to be in ring-3.
5644 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
5645 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
5646 * to the EM loop.
5647 *
5648 * @param pVM Pointer to the VM.
5649 * @param pVCpu Pointer to the VMCPU.
5650 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5651 * out-of-sync. Make sure to update the required fields
5652 * before using them.
5653 */
5654static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5655{
5656 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5657
5658 int rc = VERR_INTERNAL_ERROR_5;
5659 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
5660 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
5661 | VMCPU_FF_REQUEST | VMCPU_FF_HM_UPDATE_CR3 | VMCPU_FF_HM_UPDATE_PAE_PDPES))
5662 {
5663 /* We need the control registers now, make sure the guest-CPU context is updated. */
5664 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
5665 AssertRCReturn(rc, rc);
5666
5667 /* Pending HM CR3 sync. */
5668 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
5669 {
5670 rc = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
5671 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3);
5672 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5673 }
5674
5675 /* Pending HM PAE PDPEs. */
5676 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
5677 {
5678 rc = PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5679 AssertRC(rc);
5680 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
5681 }
5682
5683 /* Pending PGM C3 sync. */
5684 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
5685 {
5686 rc = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
5687 if (rc != VINF_SUCCESS)
5688 {
5689 AssertRC(rc);
5690 Log4(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
5691 return rc;
5692 }
5693 }
5694
5695 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
5696 /* -XXX- what was that about single stepping? */
5697 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
5698 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
5699 {
5700 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
5701 rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
5702 Log4(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
5703 return rc;
5704 }
5705
5706 /* Pending VM request packets, such as hardware interrupts. */
5707 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
5708 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
5709 {
5710 Log4(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
5711 return VINF_EM_PENDING_REQUEST;
5712 }
5713
5714 /* Pending PGM pool flushes. */
5715 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
5716 {
5717 Log4(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
5718 return VINF_PGM_POOL_FLUSH_PENDING;
5719 }
5720
5721 /* Pending DMA requests. */
5722 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
5723 {
5724 Log4(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
5725 return VINF_EM_RAW_TO_R3;
5726 }
5727 }
5728
5729 /* Paranoia. */
5730 Assert(rc != VERR_EM_INTERPRETER);
5731 return VINF_SUCCESS;
5732}
5733
5734
5735/**
5736 * Converts any TRPM trap into a pending HM event. This is typically used when
5737 * entering from ring-3 (not longjmp returns).
5738 *
5739 * @param pVCpu Pointer to the VMCPU.
5740 */
5741static void hmR0VmxTrpmTrapToPendingEvent(PVMCPU pVCpu)
5742{
5743 Assert(TRPMHasTrap(pVCpu));
5744 Assert(!pVCpu->hm.s.Event.fPending);
5745
5746 uint8_t uVector;
5747 TRPMEVENT enmTrpmEvent;
5748 RTGCUINT uErrCode;
5749 RTGCUINTPTR GCPtrFaultAddress;
5750 uint8_t cbInstr;
5751
5752 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
5753 AssertRC(rc);
5754
5755 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntrInfo. */
5756 uint32_t u32IntrInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
5757 if (enmTrpmEvent == TRPM_TRAP)
5758 {
5759 switch (uVector)
5760 {
5761 case X86_XCPT_BP:
5762 case X86_XCPT_OF:
5763 {
5764 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5765 break;
5766 }
5767
5768 case X86_XCPT_PF:
5769 case X86_XCPT_DF:
5770 case X86_XCPT_TS:
5771 case X86_XCPT_NP:
5772 case X86_XCPT_SS:
5773 case X86_XCPT_GP:
5774 case X86_XCPT_AC:
5775 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5776 /* no break! */
5777 default:
5778 {
5779 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5780 break;
5781 }
5782 }
5783 }
5784 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
5785 {
5786 if (uVector == X86_XCPT_NMI)
5787 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5788 else
5789 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5790 }
5791 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
5792 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5793 else
5794 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
5795
5796 rc = TRPMResetTrap(pVCpu);
5797 AssertRC(rc);
5798 Log4(("TRPM->HM event: u32IntrInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
5799 u32IntrInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
5800 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, uErrCode, GCPtrFaultAddress);
5801}
5802
5803
5804/**
5805 * Converts any pending HM event into a TRPM trap. Typically used when leaving
5806 * VT-x to execute any instruction.
5807 *
5808 * @param pvCpu Pointer to the VMCPU.
5809 */
5810static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
5811{
5812 Assert(pVCpu->hm.s.Event.fPending);
5813
5814 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
5815 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntrInfo);
5816 bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo);
5817 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
5818
5819 /* If a trap was already pending, we did something wrong! */
5820 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
5821
5822 TRPMEVENT enmTrapType;
5823 switch (uVectorType)
5824 {
5825 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
5826 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
5827 enmTrapType = TRPM_HARDWARE_INT;
5828 break;
5829 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
5830 enmTrapType = TRPM_SOFTWARE_INT;
5831 break;
5832 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
5833 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
5834 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
5835 enmTrapType = TRPM_TRAP;
5836 break;
5837 default:
5838 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
5839 enmTrapType = TRPM_32BIT_HACK;
5840 break;
5841 }
5842
5843 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
5844
5845 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
5846 AssertRC(rc);
5847
5848 if (fErrorCodeValid)
5849 TRPMSetErrorCode(pVCpu, uErrorCode);
5850
5851 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5852 && uVector == X86_XCPT_PF)
5853 {
5854 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
5855 }
5856 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5857 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5858 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5859 {
5860 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5861 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
5862 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
5863 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
5864 }
5865 pVCpu->hm.s.Event.fPending = false;
5866}
5867
5868
5869/**
5870 * Does the necessary state syncing before doing a longjmp to ring-3.
5871 *
5872 * @param pVM Pointer to the VM.
5873 * @param pVCpu Pointer to the VMCPU.
5874 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5875 * out-of-sync. Make sure to update the required fields
5876 * before using them.
5877 * @param rcExit The reason for exiting to ring-3. Can be
5878 * VINF_VMM_UNKNOWN_RING3_CALL.
5879 *
5880 * @remarks No-long-jmp zone!!!
5881 */
5882static void hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
5883{
5884 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
5885 Assert(VMMR0IsLogFlushDisabled(pVCpu));
5886
5887 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
5888 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL);
5889 AssertRC(rc);
5890
5891 /* Restore host FPU state if necessary and resync on next R0 reentry .*/
5892 if (CPUMIsGuestFPUStateActive(pVCpu))
5893 {
5894 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
5895 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
5896 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
5897 }
5898
5899 /* Restore host debug registers if necessary and resync on next R0 reentry. */
5900 if (CPUMIsGuestDebugStateActive(pVCpu))
5901 {
5902 CPUMR0SaveGuestDebugState(pVM, pVCpu, pMixedCtx, true /* save DR6 */);
5903 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
5904 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
5905 }
5906 else if (CPUMIsHyperDebugStateActive(pVCpu))
5907 {
5908 CPUMR0LoadHostDebugState(pVM, pVCpu);
5909 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
5910 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
5911 }
5912
5913 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
5914 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
5915 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
5916 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
5917 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
5918 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
5919 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
5920 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
5921 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
5922}
5923
5924
5925/**
5926 * An action requires us to go back to ring-3. This function does the necessary
5927 * steps before we can safely return to ring-3. This is not the same as longjmps
5928 * to ring-3, this is voluntary.
5929 *
5930 * @param pVM Pointer to the VM.
5931 * @param pVCpu Pointer to the VMCPU.
5932 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5933 * out-of-sync. Make sure to update the required fields
5934 * before using them.
5935 * @param rcExit The reason for exiting to ring-3. Can be
5936 * VINF_VMM_UNKNOWN_RING3_CALL.
5937 */
5938static void hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
5939{
5940 Assert(pVM);
5941 Assert(pVCpu);
5942 Assert(pMixedCtx);
5943 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
5944
5945 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_GUEST_STATE))
5946 {
5947 /* We want to see what the guest-state was before VM-entry, don't resync here, as we won't continue guest execution. */
5948 return;
5949 }
5950 else if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
5951 {
5952 VMXGetActivateVMCS(&pVCpu->hm.s.vmx.lasterror.u64VMCSPhys);
5953 pVCpu->hm.s.vmx.lasterror.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
5954 pVCpu->hm.s.vmx.lasterror.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
5955 pVCpu->hm.s.vmx.lasterror.idCurrentCpu = RTMpCpuId();
5956 return;
5957 }
5958
5959 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
5960 VMMRZCallRing3Disable(pVCpu);
5961 Log4(("hmR0VmxExitToRing3: pVCpu=%p idCpu=%RU32 rcExit=%d\n", pVCpu, pVCpu->idCpu, rcExit));
5962
5963 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
5964 if (pVCpu->hm.s.Event.fPending)
5965 {
5966 hmR0VmxPendingEventToTrpmTrap(pVCpu);
5967 Assert(!pVCpu->hm.s.Event.fPending);
5968 }
5969
5970 /* Sync. the guest state. */
5971 hmR0VmxLongJmpToRing3(pVM, pVCpu, pMixedCtx, rcExit);
5972 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
5973
5974 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
5975 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
5976 | CPUM_CHANGED_LDTR
5977 | CPUM_CHANGED_GDTR
5978 | CPUM_CHANGED_IDTR
5979 | CPUM_CHANGED_TR
5980 | CPUM_CHANGED_HIDDEN_SEL_REGS);
5981
5982 /* On our way back from ring-3 the following needs to be done. */
5983 /** @todo This can change with preemption hooks. */
5984 if (rcExit == VINF_EM_RAW_INTERRUPT)
5985 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
5986 else
5987 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
5988
5989 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
5990 VMMRZCallRing3Enable(pVCpu);
5991}
5992
5993
5994/**
5995 * VMMRZCallRing3() callback wrapper which saves the guest state before we
5996 * longjump to ring-3 and possibly get preempted.
5997 *
5998 * @param pVCpu Pointer to the VMCPU.
5999 * @param enmOperation The operation causing the ring-3 longjump.
6000 * @param pvUser The user argument (pointer to the possibly
6001 * out-of-date guest-CPU context).
6002 *
6003 * @remarks Must never be called with @a enmOperation ==
6004 * VMMCALLRING3_VM_R0_ASSERTION.
6005 */
6006DECLCALLBACK(void) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
6007{
6008 /* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion. */
6009 Assert(pVCpu);
6010 Assert(pvUser);
6011 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6012 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6013
6014 VMMRZCallRing3Disable(pVCpu);
6015 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6016 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
6017 hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser, VINF_VMM_UNKNOWN_RING3_CALL);
6018 VMMRZCallRing3Enable(pVCpu);
6019}
6020
6021
6022/**
6023 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
6024 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
6025 *
6026 * @param pVCpu Pointer to the VMCPU.
6027 */
6028DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
6029{
6030 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
6031 {
6032 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
6033 {
6034 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
6035 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
6036 AssertRC(rc);
6037 }
6038 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
6039}
6040
6041
6042/**
6043 * Injects any pending events into the guest if the guest is in a state to
6044 * receive them.
6045 *
6046 * @returns VBox status code (informational status codes included).
6047 * @param pVCpu Pointer to the VMCPU.
6048 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6049 * out-of-sync. Make sure to update the required fields
6050 * before using them.
6051 */
6052static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6053{
6054 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
6055 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
6056 bool fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
6057 bool fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
6058
6059 Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS));
6060 Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/
6061 && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
6062 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
6063 Assert(!TRPMHasTrap(pVCpu));
6064
6065 int rc = VINF_SUCCESS;
6066 if (pVCpu->hm.s.Event.fPending) /* First, inject any pending HM events. */
6067 {
6068 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
6069 bool fInject = true;
6070 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
6071 {
6072 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6073 AssertRCReturn(rc, rc);
6074 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
6075 if ( fBlockInt
6076 || fBlockSti
6077 || fBlockMovSS)
6078 {
6079 fInject = false;
6080 }
6081 }
6082 else if ( uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6083 && ( fBlockMovSS
6084 || fBlockSti))
6085 {
6086 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
6087 fInject = false;
6088 }
6089
6090 if (fInject)
6091 {
6092 Log4(("Injecting pending event vcpu[%RU32]\n", pVCpu->idCpu));
6093 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.cbInstr,
6094 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, &uIntrState);
6095 AssertRCReturn(rc, rc);
6096 pVCpu->hm.s.Event.fPending = false;
6097 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntReinject);
6098 }
6099 else
6100 hmR0VmxSetIntWindowExitVmcs(pVCpu);
6101 } /** @todo SMI. SMIs take priority over NMIs. */
6102 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */
6103 {
6104 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
6105 if ( !fBlockMovSS
6106 && !fBlockSti)
6107 {
6108 Log4(("Injecting NMI\n"));
6109 uint32_t u32IntrInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID;
6110 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6111 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */,
6112 0 /* GCPtrFaultAddress */, &uIntrState);
6113 AssertRCReturn(rc, rc);
6114 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
6115 }
6116 else
6117 hmR0VmxSetIntWindowExitVmcs(pVCpu);
6118 }
6119 else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
6120 {
6121 /* Check if there are guest external interrupts (PIC/APIC) pending and inject them if the guest can receive them. */
6122 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6123 AssertRCReturn(rc, rc);
6124 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
6125 if ( !fBlockInt
6126 && !fBlockSti
6127 && !fBlockMovSS)
6128 {
6129 uint8_t u8Interrupt;
6130 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
6131 if (RT_SUCCESS(rc))
6132 {
6133 Log4(("Injecting interrupt u8Interrupt=%#x\n", u8Interrupt));
6134 uint32_t u32IntrInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID;
6135 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6136 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */,
6137 0 /* GCPtrFaultAddress */, &uIntrState);
6138 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
6139 }
6140 else
6141 {
6142 /** @todo Does this actually happen? If not turn it into an assertion. */
6143 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
6144 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
6145 rc = VINF_SUCCESS;
6146 }
6147 }
6148 else
6149 hmR0VmxSetIntWindowExitVmcs(pVCpu);
6150 }
6151
6152 /*
6153 * Delivery pending debug exception if the guest is single-stepping. The interruptibility-state could have been changed by
6154 * hmR0VmxInjectEventVmcs() (e.g. real-on-v86 injecting software interrupts), re-evaluate it and set the BS bit.
6155 */
6156 fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
6157 fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
6158 int rc2 = VINF_SUCCESS;
6159 if ( fBlockSti
6160 || fBlockMovSS)
6161 {
6162 if (!DBGFIsStepping(pVCpu))
6163 {
6164 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS);
6165 if (pMixedCtx->eflags.Bits.u1TF) /* We don't have any IA32_DEBUGCTL MSR for guests. Treat as all bits 0. */
6166 {
6167 /*
6168 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD, VMX_EXIT_MTF
6169 * VMX_EXIT_APIC_WRITE, VMX_EXIT_VIRTUALIZED_EOI. See Intel spec. 27.3.4 "Saving Non-Register State".
6170 */
6171 rc2 = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
6172 AssertRCReturn(rc, rc);
6173 }
6174 }
6175 else
6176 {
6177 /* We are single-stepping in the hypervisor debugger, clear interrupt inhibition as setting the BS bit would mean
6178 delivering a #DB to the guest upon VM-entry when it shouldn't be. */
6179 uIntrState = 0;
6180 }
6181 }
6182
6183 /*
6184 * There's no need to clear the VM entry-interruption information field here if we're not injecting anything.
6185 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6186 */
6187 rc2 = hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);
6188 AssertRC(rc2);
6189
6190 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
6191 return rc;
6192}
6193
6194
6195/**
6196 * Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM.
6197 *
6198 * @param pVCpu Pointer to the VMCPU.
6199 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6200 * out-of-sync. Make sure to update the required fields
6201 * before using them.
6202 */
6203DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6204{
6205 uint32_t u32IntrInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID;
6206 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6207}
6208
6209
6210/**
6211 * Injects a double-fault (#DF) exception into the VM.
6212 *
6213 * @returns VBox status code (informational status code included).
6214 * @param pVCpu Pointer to the VMCPU.
6215 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6216 * out-of-sync. Make sure to update the required fields
6217 * before using them.
6218 */
6219DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t *puIntrState)
6220{
6221 uint32_t u32IntrInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
6222 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6223 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6224 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
6225 puIntrState);
6226}
6227
6228
6229/**
6230 * Sets a debug (#DB) exception as pending-for-injection into the VM.
6231 *
6232 * @param pVCpu Pointer to the VMCPU.
6233 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6234 * out-of-sync. Make sure to update the required fields
6235 * before using them.
6236 */
6237DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6238{
6239 uint32_t u32IntrInfo = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID;
6240 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6241 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6242}
6243
6244
6245/**
6246 * Sets an overflow (#OF) exception as pending-for-injection into the VM.
6247 *
6248 * @param pVCpu Pointer to the VMCPU.
6249 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6250 * out-of-sync. Make sure to update the required fields
6251 * before using them.
6252 * @param cbInstr The value of RIP that is to be pushed on the guest
6253 * stack.
6254 */
6255DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
6256{
6257 uint32_t u32IntrInfo = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID;
6258 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6259 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6260}
6261
6262
6263/**
6264 * Injects a general-protection (#GP) fault into the VM.
6265 *
6266 * @returns VBox status code (informational status code included).
6267 * @param pVCpu Pointer to the VMCPU.
6268 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6269 * out-of-sync. Make sure to update the required fields
6270 * before using them.
6271 * @param u32ErrorCode The error code associated with the #GP.
6272 */
6273DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
6274 uint32_t *puIntrState)
6275{
6276 uint32_t u32IntrInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
6277 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6278 if (fErrorCodeValid)
6279 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6280 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
6281 puIntrState);
6282}
6283
6284
6285/**
6286 * Sets a software interrupt (INTn) as pending-for-injection into the VM.
6287 *
6288 * @param pVCpu Pointer to the VMCPU.
6289 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6290 * out-of-sync. Make sure to update the required fields
6291 * before using them.
6292 * @param uVector The software interrupt vector number.
6293 * @param cbInstr The value of RIP that is to be pushed on the guest
6294 * stack.
6295 */
6296DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
6297{
6298 uint32_t u32IntrInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
6299 if ( uVector == X86_XCPT_BP
6300 || uVector == X86_XCPT_OF)
6301 {
6302 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6303 }
6304 else
6305 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6306 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6307}
6308
6309
6310/**
6311 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
6312 * stack.
6313 *
6314 * @returns VBox status code (information status code included).
6315 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
6316 * @param pVM Pointer to the VM.
6317 * @param pMixedCtx Pointer to the guest-CPU context.
6318 * @param uValue The value to push to the guest stack.
6319 */
6320DECLINLINE(int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
6321{
6322 /*
6323 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
6324 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
6325 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
6326 */
6327 if (pMixedCtx->sp == 1)
6328 return VINF_EM_RESET;
6329 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
6330 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
6331 AssertRCReturn(rc, rc);
6332 return rc;
6333}
6334
6335
6336/**
6337 * Injects an event into the guest upon VM-entry by updating the relevant fields
6338 * in the VM-entry area in the VMCS.
6339 *
6340 * @returns VBox status code (informational error codes included).
6341 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
6342 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
6343 *
6344 * @param pVCpu Pointer to the VMCPU.
6345 * @param pMixedCtx Pointer to the guest-CPU context. The data may
6346 * be out-of-sync. Make sure to update the required
6347 * fields before using them.
6348 * @param u64IntrInfo The VM-entry interruption-information field.
6349 * @param cbInstr The VM-entry instruction length in bytes (for
6350 * software interrupts, exceptions and privileged
6351 * software exceptions).
6352 * @param u32ErrCode The VM-entry exception error code.
6353 * @param GCPtrFaultAddress The page-fault address for #PF exceptions.
6354 * @param puIntrState Pointer to the current guest interruptibility-state.
6355 * This interruptibility-state will be updated if
6356 * necessary. This cannot not be NULL.
6357 *
6358 * @remarks No-long-jump zone!!!
6359 * @remarks Requires CR0!
6360 */
6361static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
6362 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState)
6363{
6364 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
6365 AssertMsg(u64IntrInfo >> 32 == 0, ("%#RX64\n", u64IntrInfo));
6366 Assert(puIntrState);
6367 uint32_t u32IntrInfo = (uint32_t)u64IntrInfo;
6368
6369 const uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntrInfo);
6370 const uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo);
6371
6372 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
6373 Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6374 || !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
6375
6376 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
6377
6378 /* We require CR0 to check if the guest is in real-mode. */
6379 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6380 AssertRCReturn(rc, rc);
6381
6382 /*
6383 * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
6384 * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest.
6385 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
6386 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
6387 */
6388 if (CPUMIsGuestInRealModeEx(pMixedCtx))
6389 {
6390 PVM pVM = pVCpu->CTX_SUFF(pVM);
6391 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
6392 {
6393 Assert(PDMVmmDevHeapIsEnabled(pVM));
6394 Assert(pVM->hm.s.vmx.pRealModeTSS);
6395
6396 /* We require RIP, RSP, RFLAGS, CS, IDTR. Save the required ones from the VMCS. */
6397 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6398 rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
6399 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
6400 AssertRCReturn(rc, rc);
6401 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP);
6402
6403 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
6404 const size_t cbIdtEntry = 4;
6405 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
6406 {
6407 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
6408 if (uVector == X86_XCPT_DF)
6409 return VINF_EM_RESET;
6410 else if (uVector == X86_XCPT_GP)
6411 {
6412 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
6413 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, puIntrState);
6414 }
6415
6416 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
6417 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
6418 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, puIntrState);
6419 }
6420
6421 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
6422 uint16_t uGuestIp = pMixedCtx->ip;
6423 if (VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
6424 {
6425 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
6426 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
6427 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
6428 }
6429 else if (VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
6430 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
6431
6432 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
6433 uint16_t offIdtEntry = 0;
6434 RTSEL selIdtEntry = 0;
6435 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
6436 rc = PGMPhysSimpleReadGCPhys(pVM, &offIdtEntry, GCPhysIdtEntry, sizeof(offIdtEntry));
6437 rc |= PGMPhysSimpleReadGCPhys(pVM, &selIdtEntry, GCPhysIdtEntry + 2, sizeof(selIdtEntry));
6438 AssertRCReturn(rc, rc);
6439
6440 /* Construct the stack frame for the interrupt/exception handler. */
6441 rc = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
6442 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
6443 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
6444 AssertRCReturn(rc, rc);
6445
6446 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
6447 if (rc == VINF_SUCCESS)
6448 {
6449 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
6450 pMixedCtx->rip = offIdtEntry;
6451 pMixedCtx->cs.Sel = selIdtEntry;
6452 pMixedCtx->cs.u64Base = selIdtEntry << cbIdtEntry;
6453 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
6454 && uVector == X86_XCPT_PF)
6455 {
6456 pMixedCtx->cr2 = GCPtrFaultAddress;
6457 }
6458 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS
6459 | HM_CHANGED_GUEST_RIP
6460 | HM_CHANGED_GUEST_RFLAGS
6461 | HM_CHANGED_GUEST_RSP;
6462
6463 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
6464 if (*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
6465 {
6466 Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6467 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
6468 Log4(("Clearing inhibition due to STI.\n"));
6469 *puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
6470 }
6471 Log4(("Injecting real-mode: u32IntrInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntrInfo, u32ErrCode, cbInstr));
6472 }
6473 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
6474 return rc;
6475 }
6476 else
6477 {
6478 /*
6479 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
6480 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
6481 */
6482 u32IntrInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6483 }
6484 }
6485
6486 /* Validate. */
6487 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntrInfo)); /* Bit 31 (Valid bit) must be set by caller. */
6488 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK(u32IntrInfo)); /* Bit 12 MBZ. */
6489 Assert(!(u32IntrInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
6490
6491 /* Inject. */
6492 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntrInfo);
6493 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntrInfo))
6494 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
6495 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
6496
6497 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
6498 && uVector == X86_XCPT_PF)
6499 {
6500 pMixedCtx->cr2 = GCPtrFaultAddress;
6501 }
6502
6503 Log4(("Injecting vcpu[%RU32] u32IntrInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", pVCpu->idCpu,
6504 u32IntrInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
6505
6506 AssertRCReturn(rc, rc);
6507 return rc;
6508}
6509
6510
6511/**
6512 * Enters the VT-x session.
6513 *
6514 * @returns VBox status code.
6515 * @param pVM Pointer to the VM.
6516 * @param pVCpu Pointer to the VMCPU.
6517 * @param pCpu Pointer to the CPU info struct.
6518 */
6519VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
6520{
6521 AssertPtr(pVM);
6522 AssertPtr(pVCpu);
6523 Assert(pVM->hm.s.vmx.fSupported);
6524 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6525 NOREF(pCpu);
6526
6527 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6528
6529 /* Make sure we're in VMX root mode. */
6530 RTCCUINTREG u32HostCR4 = ASMGetCR4();
6531 if (!(u32HostCR4 & X86_CR4_VMXE))
6532 {
6533 LogRel(("VMXR0Enter: X86_CR4_VMXE bit in CR4 is not set!\n"));
6534 return VERR_VMX_X86_CR4_VMXE_CLEARED;
6535 }
6536
6537 /* Load the active VMCS as the current one. */
6538 int rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
6539 if (RT_FAILURE(rc))
6540 return rc;
6541
6542 /** @todo this will change with preemption hooks where can can VMRESUME as long
6543 * as we're no preempted. */
6544 pVCpu->hm.s.fResumeVM = false;
6545 return VINF_SUCCESS;
6546}
6547
6548
6549/**
6550 * Leaves the VT-x session.
6551 *
6552 * @returns VBox status code.
6553 * @param pVM Pointer to the VM.
6554 * @param pVCpu Pointer to the VMCPU.
6555 * @param pCtx Pointer to the guest-CPU context.
6556 */
6557VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
6558{
6559 AssertPtr(pVCpu);
6560 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6561 NOREF(pVM);
6562 NOREF(pCtx);
6563
6564 /** @todo this will change with preemption hooks where we only VMCLEAR when
6565 * we are actually going to be preempted, not all the time like we
6566 * currently do. */
6567
6568 /* Restore host-state bits that VT-x only restores partially. */
6569 if (pVCpu->hm.s.vmx.fRestoreHostFlags)
6570 {
6571#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
6572 /** @todo r=ramshankar: This is broken when
6573 * VBOX_WITH_VMMR0_DISABLE_PREEMPTION is not defined. As
6574 * VMXRestoreHostState() may unconditionally enables interrupts. */
6575#error "VMM: Fix Me! Make VMXRestoreHostState() function to skip cli/sti."
6576#else
6577 Assert(ASMIntAreEnabled());
6578 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
6579#endif
6580 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
6581 }
6582
6583 /*
6584 * Sync the current VMCS (writes back internal data back into the VMCS region in memory)
6585 * and mark the VMCS launch-state as "clear".
6586 */
6587 int rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
6588 return rc;
6589}
6590
6591
6592/**
6593 * Saves the host state in the VMCS host-state.
6594 * Sets up the VM-exit MSR-load area.
6595 *
6596 * The CPU state will be loaded from these fields on every successful VM-exit.
6597 *
6598 * @returns VBox status code.
6599 * @param pVM Pointer to the VM.
6600 * @param pVCpu Pointer to the VMCPU.
6601 *
6602 * @remarks No-long-jump zone!!!
6603 */
6604VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
6605{
6606 AssertPtr(pVM);
6607 AssertPtr(pVCpu);
6608 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6609
6610 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6611
6612 /* Nothing to do if the host-state-changed flag isn't set. This will later be optimized when preemption hooks are in place. */
6613 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT))
6614 return VINF_SUCCESS;
6615
6616 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
6617 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6618
6619 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
6620 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6621
6622 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
6623 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6624
6625 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;
6626 return rc;
6627}
6628
6629
6630/**
6631 * Loads the guest state into the VMCS guest-state area. The CPU state will be
6632 * loaded from these fields on every successful VM-entry.
6633 *
6634 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas.
6635 * Sets up the VM-entry controls.
6636 * Sets up the appropriate VMX non-root function to execute guest code based on
6637 * the guest CPU mode.
6638 *
6639 * @returns VBox status code.
6640 * @param pVM Pointer to the VM.
6641 * @param pVCpu Pointer to the VMCPU.
6642 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6643 * out-of-sync. Make sure to update the required fields
6644 * before using them.
6645 *
6646 * @remarks No-long-jump zone!!!
6647 */
6648VMMR0DECL(int) VMXR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6649{
6650 AssertPtr(pVM);
6651 AssertPtr(pVCpu);
6652 AssertPtr(pMixedCtx);
6653 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6654
6655#ifdef LOG_ENABLED
6656 /** @todo r=ramshankar: I'm not able to use VMMRZCallRing3Disable() here,
6657 * probably not initialized yet? Anyway this will do for now. */
6658 bool fCallerDisabledLogFlush = VMMR0IsLogFlushDisabled(pVCpu);
6659 VMMR0LogFlushDisable(pVCpu);
6660#endif
6661
6662 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6663
6664 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
6665
6666 /* Determine real-on-v86 mode. */
6667 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
6668 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
6669 && CPUMIsGuestInRealModeEx(pMixedCtx))
6670 {
6671 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
6672 }
6673
6674 /*
6675 * Load the guest-state into the VMCS.
6676 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
6677 * Ideally, assert that the cross-dependent bits are up to date at the point of using it.
6678 */
6679 int rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
6680 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6681
6682 rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
6683 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6684
6685 rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
6686 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6687
6688 rc = hmR0VmxLoadGuestControlRegs(pVCpu, pMixedCtx);
6689 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestControlRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6690
6691 /* Must be done after CR0 is loaded (strict builds require CR0 for segment register validation checks). */
6692 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
6693 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6694
6695 rc = hmR0VmxLoadGuestDebugRegs(pVCpu, pMixedCtx);
6696 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestDebugRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6697
6698 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
6699 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6700
6701 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
6702 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6703
6704 /* Must be done after hmR0VmxLoadGuestDebugRegs() as it may update eflags.TF for debugging purposes. */
6705 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
6706 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestRipRspRflags! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6707
6708 rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
6709 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6710
6711 /* Clear any unused and reserved bits. */
6712 pVCpu->hm.s.fContextUseFlags &= ~( HM_CHANGED_GUEST_CR2
6713 | HM_CHANGED_GUEST_MSR /* legacy */);
6714
6715 AssertMsg(!pVCpu->hm.s.fContextUseFlags,
6716 ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p idCpu=%RU32 fContextUseFlags=%#RX32\n",
6717 pVM, pVCpu, pVCpu->idCpu, pVCpu->hm.s.fContextUseFlags));
6718
6719#ifdef LOG_ENABLED
6720 /* Only reenable log-flushing if the caller has it enabled. */
6721 if (!fCallerDisabledLogFlush)
6722 VMMR0LogFlushEnable(pVCpu);
6723#endif
6724
6725 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
6726 return rc;
6727}
6728
6729
6730/**
6731 * Does the preparations before executing guest code in VT-x.
6732 *
6733 * This may cause longjmps to ring-3 and may even result in rescheduling to the
6734 * recompiler. We must be cautious what we do here regarding committing
6735 * guest-state information into the the VMCS assuming we assuredly execute the
6736 * guest in VT-x. If we fall back to the recompiler after updating the VMCS and
6737 * clearing the common-state (TRPM/forceflags), we must undo those changes so
6738 * that the recompiler can (and should) use them when it resumes guest
6739 * execution. Otherwise such operations must be done when we can no longer
6740 * exit to ring-3.
6741 *
6742 * @returns VBox status code (informational status codes included).
6743 * @retval VINF_SUCCESS if we can proceed with running the guest.
6744 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a double-fault
6745 * into the guest.
6746 * @retval VINF_* scheduling changes, we have to go back to ring-3.
6747 *
6748 * @param pVM Pointer to the VM.
6749 * @param pVCpu Pointer to the VMCPU.
6750 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6751 * out-of-sync. Make sure to update the required fields
6752 * before using them.
6753 * @param pVmxTransient Pointer to the VMX transient structure.
6754 *
6755 * @remarks Called with preemption disabled.
6756 */
6757DECLINLINE(int) hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6758{
6759 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6760
6761#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
6762 PGMRZDynMapFlushAutoSet(pVCpu);
6763#endif
6764
6765 /* Check force flag actions that might require us to go back to ring-3. */
6766 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx);
6767 if (rc != VINF_SUCCESS)
6768 return rc;
6769
6770 /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
6771 if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
6772 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
6773 {
6774 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
6775 RTGCPHYS GCPhysApicBase;
6776 GCPhysApicBase = pMixedCtx->msrApicBase;
6777 GCPhysApicBase &= PAGE_BASE_GC_MASK;
6778
6779 /* Unalias any existing mapping. */
6780 rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
6781 AssertRCReturn(rc, rc);
6782
6783 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
6784 Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));
6785 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
6786 AssertRCReturn(rc, rc);
6787
6788 pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
6789 }
6790
6791#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
6792 /* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */
6793 pVmxTransient->uEFlags = ASMIntDisableFlags();
6794 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
6795 {
6796 ASMSetFlags(pVmxTransient->uEFlags);
6797 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
6798 /* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */
6799 return VINF_EM_RAW_INTERRUPT;
6800 }
6801 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
6802 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
6803#endif
6804
6805 /*
6806 * Evaluates and injects any pending events, toggling force-flags and updating the guest-interruptibility
6807 * state (interrupt shadow) in the VMCS. This -can- potentially be reworked to be done before disabling
6808 * interrupts and handle returning to ring-3 afterwards, but requires very careful state restoration.
6809 */
6810 /** @todo Rework event evaluation and injection to be completely separate. */
6811 if (TRPMHasTrap(pVCpu))
6812 hmR0VmxTrpmTrapToPendingEvent(pVCpu);
6813
6814 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx);
6815 AssertRCReturn(rc, rc);
6816 return rc;
6817}
6818
6819
6820/**
6821 * Prepares to run guest code in VT-x and we've committed to doing so. This
6822 * means there is no backing out to ring-3 or anywhere else at this
6823 * point.
6824 *
6825 * @param pVM Pointer to the VM.
6826 * @param pVCpu Pointer to the VMCPU.
6827 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6828 * out-of-sync. Make sure to update the required fields
6829 * before using them.
6830 * @param pVmxTransient Pointer to the VMX transient structure.
6831 *
6832 * @remarks Called with preemption disabled.
6833 * @remarks No-long-jump zone!!!
6834 */
6835DECLINLINE(void) hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6836{
6837 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6838 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6839
6840#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
6841 /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */
6842 pVmxTransient->uEFlags = ASMIntDisableFlags();
6843 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
6844#endif
6845
6846 /* Load the required guest state bits (for guest-state changes in the inner execution loop). */
6847 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT));
6848 Log5(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));
6849#ifdef HMVMX_SYNC_FULL_GUEST_STATE
6850 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
6851#endif
6852 int rc = VINF_SUCCESS;
6853 if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP)
6854 {
6855 rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
6856 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
6857 }
6858 else if (pVCpu->hm.s.fContextUseFlags)
6859 {
6860 rc = VMXR0LoadGuestState(pVM, pVCpu, pMixedCtx);
6861 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
6862 }
6863 AssertRC(rc);
6864 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
6865
6866 /* Cache the TPR-shadow for checking on every VM-exit if it might have changed. */
6867 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
6868 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
6869
6870 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
6871 || HMR0GetCurrentCpu()->idCpu != pVCpu->hm.s.idLastCpu)
6872 {
6873 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pMixedCtx);
6874 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
6875 }
6876
6877 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
6878 hmR0VmxFlushTaggedTlb(pVCpu); /* Invalidate the appropriate guest entries from the TLB. */
6879 Assert(HMR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu);
6880
6881#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
6882 /*
6883 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
6884 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
6885 */
6886 if ( (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
6887 && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
6888 {
6889 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
6890 uint64_t u64HostTscAux = 0;
6891 int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux);
6892 AssertRC(rc2);
6893 ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux);
6894 }
6895#endif
6896
6897 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
6898 to start executing. */
6899 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
6900}
6901
6902
6903/**
6904 * Performs some essential restoration of state after running guest code in
6905 * VT-x.
6906 *
6907 * @param pVM Pointer to the VM.
6908 * @param pVCpu Pointer to the VMCPU.
6909 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6910 * out-of-sync. Make sure to update the required fields
6911 * before using them.
6912 * @param pVmxTransient Pointer to the VMX transient structure.
6913 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
6914 *
6915 * @remarks Called with interrupts disabled.
6916 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
6917 * unconditionally when it is safe to do so.
6918 */
6919DECLINLINE(void) hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
6920{
6921 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6922 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
6923
6924 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
6925 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
6926 pVCpu->hm.s.vmx.fUpdatedGuestState = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */
6927 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
6928 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
6929
6930 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
6931 {
6932#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
6933 /* Restore host's TSC_AUX. */
6934 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
6935 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
6936#endif
6937 /** @todo Find a way to fix hardcoding a guestimate. */
6938 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC()
6939 + pVCpu->hm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
6940 }
6941
6942 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
6943 Assert(!(ASMGetFlags() & X86_EFL_IF));
6944 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
6945
6946 ASMSetFlags(pVmxTransient->uEFlags); /* Enable interrupts. */
6947 pVCpu->hm.s.fResumeVM = true; /* Use VMRESUME instead of VMLAUNCH in the next run. */
6948
6949 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
6950 uint32_t uExitReason;
6951 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
6952 rc |= hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
6953 AssertRC(rc);
6954 pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
6955 pVmxTransient->fVMEntryFailed = !!VMX_ENTRY_INTERRUPTION_INFO_VALID(pVmxTransient->uEntryIntrInfo);
6956
6957 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pMixedCtx);
6958 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
6959
6960 /* If the VMLAUNCH/VMRESUME failed, we can bail out early. This does -not- cover VMX_EXIT_ERR_*. */
6961 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
6962 {
6963 Log4(("VM-entry failure: pVCpu=%p idCpu=%RU32 rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", pVCpu, pVCpu->idCpu, rcVMRun,
6964 pVmxTransient->fVMEntryFailed));
6965 return;
6966 }
6967
6968 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
6969 {
6970 /* Update the guest interruptibility-state from the VMCS. */
6971 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
6972#if defined(HMVMX_SYNC_FULL_GUEST_STATE) || defined(HMVMX_SAVE_FULL_GUEST_STATE)
6973 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
6974 AssertRC(rc);
6975#endif
6976 /*
6977 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
6978 * we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3 and which is
6979 * why it's done here as it's easier and no less efficient to deal with it here than making hmR0VmxSaveGuestState()
6980 * cope with longjmps safely (see VMCPU_FF_HM_UPDATE_CR3 handling).
6981 */
6982 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
6983 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
6984 {
6985 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
6986 AssertRC(rc);
6987 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
6988 }
6989 }
6990}
6991
6992
6993/**
6994 * Runs the guest code using VT-x.
6995 *
6996 * @returns VBox status code.
6997 * @param pVM Pointer to the VM.
6998 * @param pVCpu Pointer to the VMCPU.
6999 * @param pCtx Pointer to the guest-CPU context.
7000 *
7001 * @remarks Called with preemption disabled.
7002 */
7003VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
7004{
7005 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7006 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7007
7008 VMXTRANSIENT VmxTransient;
7009 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
7010 int rc = VERR_INTERNAL_ERROR_5;
7011 uint32_t cLoops = 0;
7012
7013 for (;; cLoops++)
7014 {
7015 Assert(!HMR0SuspendPending());
7016 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
7017 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
7018 (unsigned)RTMpCpuId(), cLoops));
7019
7020 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
7021 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
7022 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
7023 if (rc != VINF_SUCCESS)
7024 break;
7025
7026 /*
7027 * No longjmps to ring-3 from this point on!!!
7028 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
7029 * This also disables flushing of the R0-logger instance (if any).
7030 */
7031 VMMRZCallRing3Disable(pVCpu);
7032 VMMRZCallRing3RemoveNotification(pVCpu);
7033 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
7034
7035 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
7036 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
7037
7038 /*
7039 * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
7040 * This will also re-enable longjmps to ring-3 when it has reached a safe point!!!
7041 */
7042 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
7043 if (RT_UNLIKELY(rc != VINF_SUCCESS)) /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
7044 {
7045 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
7046 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
7047 return rc;
7048 }
7049
7050 /* Handle the VM-exit. */
7051 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
7052 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
7053 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
7054 HMVMX_START_EXIT_DISPATCH_PROF();
7055#ifdef HMVMX_USE_FUNCTION_TABLE
7056 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
7057#else
7058 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
7059#endif
7060 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
7061 if (rc != VINF_SUCCESS)
7062 break;
7063 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
7064 {
7065 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
7066 rc = VINF_EM_RAW_INTERRUPT;
7067 break;
7068 }
7069 }
7070
7071 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
7072 if (rc == VERR_EM_INTERPRETER)
7073 rc = VINF_EM_RAW_EMULATE_INSTR;
7074 else if (rc == VINF_EM_RESET)
7075 rc = VINF_EM_TRIPLE_FAULT;
7076 hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rc);
7077 return rc;
7078}
7079
7080
7081#ifndef HMVMX_USE_FUNCTION_TABLE
7082DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
7083{
7084 int rc;
7085 switch (rcReason)
7086 {
7087 case VMX_EXIT_EPT_MISCONFIG: rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient); break;
7088 case VMX_EXIT_EPT_VIOLATION: rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient); break;
7089 case VMX_EXIT_IO_INSTR: rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient); break;
7090 case VMX_EXIT_CPUID: rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient); break;
7091 case VMX_EXIT_RDTSC: rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient); break;
7092 case VMX_EXIT_RDTSCP: rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient); break;
7093 case VMX_EXIT_APIC_ACCESS: rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient); break;
7094 case VMX_EXIT_XCPT_NMI: rc = hmR0VmxExitXcptNmi(pVCpu, pMixedCtx, pVmxTransient); break;
7095 case VMX_EXIT_MOV_CRX: rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient); break;
7096 case VMX_EXIT_EXT_INT: rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient); break;
7097 case VMX_EXIT_INT_WINDOW: rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient); break;
7098 case VMX_EXIT_MWAIT: rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient); break;
7099 case VMX_EXIT_MONITOR: rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient); break;
7100 case VMX_EXIT_TASK_SWITCH: rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient); break;
7101 case VMX_EXIT_PREEMPT_TIMER: rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient); break;
7102 case VMX_EXIT_RDMSR: rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient); break;
7103 case VMX_EXIT_WRMSR: rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient); break;
7104 case VMX_EXIT_MOV_DRX: rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient); break;
7105 case VMX_EXIT_TPR_BELOW_THRESHOLD: rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); break;
7106 case VMX_EXIT_HLT: rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient); break;
7107 case VMX_EXIT_INVD: rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient); break;
7108 case VMX_EXIT_INVLPG: rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient); break;
7109 case VMX_EXIT_RSM: rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient); break;
7110 case VMX_EXIT_MTF: rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient); break;
7111 case VMX_EXIT_PAUSE: rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient); break;
7112 case VMX_EXIT_XDTR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
7113 case VMX_EXIT_TR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
7114 case VMX_EXIT_WBINVD: rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient); break;
7115 case VMX_EXIT_XSETBV: rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient); break;
7116 case VMX_EXIT_RDRAND: rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient); break;
7117 case VMX_EXIT_INVPCID: rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient); break;
7118 case VMX_EXIT_GETSEC: rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient); break;
7119 case VMX_EXIT_RDPMC: rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient); break;
7120
7121 case VMX_EXIT_TRIPLE_FAULT: rc = hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); break;
7122 case VMX_EXIT_NMI_WINDOW: rc = hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient); break;
7123 case VMX_EXIT_INIT_SIGNAL: rc = hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient); break;
7124 case VMX_EXIT_SIPI: rc = hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient); break;
7125 case VMX_EXIT_IO_SMI: rc = hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient); break;
7126 case VMX_EXIT_SMI: rc = hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient); break;
7127 case VMX_EXIT_ERR_MSR_LOAD: rc = hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient); break;
7128 case VMX_EXIT_ERR_INVALID_GUEST_STATE: rc = hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient); break;
7129 case VMX_EXIT_ERR_MACHINE_CHECK: rc = hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient); break;
7130
7131 case VMX_EXIT_VMCALL:
7132 case VMX_EXIT_VMCLEAR:
7133 case VMX_EXIT_VMLAUNCH:
7134 case VMX_EXIT_VMPTRLD:
7135 case VMX_EXIT_VMPTRST:
7136 case VMX_EXIT_VMREAD:
7137 case VMX_EXIT_VMRESUME:
7138 case VMX_EXIT_VMWRITE:
7139 case VMX_EXIT_VMXOFF:
7140 case VMX_EXIT_VMXON:
7141 case VMX_EXIT_INVEPT:
7142 case VMX_EXIT_INVVPID:
7143 case VMX_EXIT_VMFUNC:
7144 rc = hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
7145 break;
7146 default:
7147 rc = hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
7148 break;
7149 }
7150 return rc;
7151}
7152#endif
7153
7154#ifdef DEBUG
7155/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
7156# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
7157 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
7158
7159# define HMVMX_ASSERT_PREEMPT_CPUID() \
7160 do \
7161 { \
7162 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
7163 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
7164 } while (0)
7165
7166# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
7167 do { \
7168 AssertPtr(pVCpu); \
7169 AssertPtr(pMixedCtx); \
7170 AssertPtr(pVmxTransient); \
7171 Assert(pVmxTransient->fVMEntryFailed == false); \
7172 Assert(ASMIntAreEnabled()); \
7173 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
7174 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
7175 Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", pVCpu->idCpu)); \
7176 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
7177 if (VMMR0IsLogFlushDisabled(pVCpu)) \
7178 HMVMX_ASSERT_PREEMPT_CPUID(); \
7179 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
7180 } while (0)
7181
7182# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
7183 do { \
7184 Log4Func(("\n")); \
7185 } while(0)
7186#else /* Release builds */
7187# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() do { HMVMX_STOP_EXIT_DISPATCH_PROF(); } while(0)
7188# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while(0)
7189#endif
7190
7191
7192/**
7193 * Advances the guest RIP after reading it from the VMCS.
7194 *
7195 * @returns VBox status code.
7196 * @param pVCpu Pointer to the VMCPU.
7197 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
7198 * out-of-sync. Make sure to update the required fields
7199 * before using them.
7200 * @param pVmxTransient Pointer to the VMX transient structure.
7201 *
7202 * @remarks No-long-jump zone!!!
7203 */
7204DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7205{
7206 int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7207 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
7208 AssertRCReturn(rc, rc);
7209
7210 pMixedCtx->rip += pVmxTransient->cbInstr;
7211 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7212 return rc;
7213}
7214
7215
7216/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7217/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7218/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7219
7220/** @name VM-exit handlers.
7221 * @{
7222 */
7223
7224/**
7225 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7226 */
7227HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7228{
7229 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7230 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
7231 /* 32-bit Windows hosts (4 cores) has trouble with this; causes higher interrupt latency. */
7232#if HC_ARCH_BITS == 64 && defined(VBOX_WITH_VMMR0_DISABLE_PREEMPTION)
7233 Assert(ASMIntAreEnabled());
7234 return VINF_SUCCESS;
7235#else
7236 return VINF_EM_RAW_INTERRUPT;
7237#endif
7238}
7239
7240
7241/**
7242 * VM-exit handler for exceptions and NMIs (VMX_EXIT_XCPT_NMI).
7243 */
7244HMVMX_EXIT_DECL hmR0VmxExitXcptNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7245{
7246 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7247 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
7248
7249 int rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
7250 AssertRCReturn(rc, rc);
7251
7252 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntrInfo);
7253 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)
7254 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
7255
7256 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
7257 {
7258 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
7259 return VINF_EM_RAW_INTERRUPT;
7260 }
7261
7262 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
7263 rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
7264 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
7265 {
7266 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
7267 return VINF_SUCCESS;
7268 }
7269 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
7270 {
7271 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
7272 return rc;
7273 }
7274
7275 uint32_t uExitIntrInfo = pVmxTransient->uExitIntrInfo;
7276 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntrInfo);
7277 switch (uIntrType)
7278 {
7279 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
7280 Assert(uVector == X86_XCPT_DB || uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
7281 /* no break */
7282 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
7283 {
7284 switch (uVector)
7285 {
7286 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
7287 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
7288 case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVCpu, pMixedCtx, pVmxTransient); break;
7289 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
7290 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
7291 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
7292#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7293 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
7294 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7295 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
7296 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7297 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
7298 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7299 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
7300 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7301 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
7302 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7303#endif
7304 default:
7305 {
7306 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7307 AssertRCReturn(rc, rc);
7308
7309 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
7310 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
7311 {
7312 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
7313 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
7314 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7315 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
7316 AssertRCReturn(rc, rc);
7317 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntrInfo),
7318 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode,
7319 0 /* GCPtrFaultAddress */);
7320 AssertRCReturn(rc, rc);
7321 }
7322 else
7323 {
7324 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
7325 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
7326 }
7327 break;
7328 }
7329 }
7330 break;
7331 }
7332
7333 case VMX_EXIT_INTERRUPTION_INFO_TYPE_DB_XCPT:
7334 default:
7335 {
7336 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE;
7337 AssertMsgFailed(("Unexpected interruption code %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntrInfo)));
7338 break;
7339 }
7340 }
7341 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
7342 return rc;
7343}
7344
7345
7346/**
7347 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7348 */
7349HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7350{
7351 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7352
7353 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7354 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7355 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7356 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7357 AssertRCReturn(rc, rc);
7358
7359 /* Deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and resume guest execution. */
7360 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
7361 return VINF_SUCCESS;
7362}
7363
7364
7365/**
7366 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7367 */
7368HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7369{
7370 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7371 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7372 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7373}
7374
7375
7376/**
7377 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7378 */
7379HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7380{
7381 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7382 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
7383 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7384}
7385
7386
7387/**
7388 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7389 */
7390HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7391{
7392 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7393 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
7394 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7395}
7396
7397
7398/**
7399 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7400 */
7401HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7402{
7403 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7404 PVM pVM = pVCpu->CTX_SUFF(pVM);
7405 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7406 if (RT_LIKELY(rc == VINF_SUCCESS))
7407 {
7408 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7409 Assert(pVmxTransient->cbInstr == 2);
7410 }
7411 else
7412 {
7413 AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
7414 rc = VERR_EM_INTERPRETER;
7415 }
7416 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
7417 return rc;
7418}
7419
7420
7421/**
7422 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7423 */
7424HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7425{
7426 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7427 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
7428 AssertRCReturn(rc, rc);
7429
7430 if (pMixedCtx->cr4 & X86_CR4_SMXE)
7431 return VINF_EM_RAW_EMULATE_INSTR;
7432
7433 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
7434 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7435}
7436
7437
7438/**
7439 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7440 */
7441HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7442{
7443 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7444 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
7445 AssertRCReturn(rc, rc);
7446
7447 PVM pVM = pVCpu->CTX_SUFF(pVM);
7448 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7449 if (RT_LIKELY(rc == VINF_SUCCESS))
7450 {
7451 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7452 Assert(pVmxTransient->cbInstr == 2);
7453 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
7454 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
7455 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7456 }
7457 else
7458 {
7459 AssertMsgFailed(("hmR0VmxExitRdtsc: EMInterpretRdtsc failed with %Rrc\n", rc));
7460 rc = VERR_EM_INTERPRETER;
7461 }
7462 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
7463 return rc;
7464}
7465
7466
7467/**
7468 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7469 */
7470HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7471{
7472 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7473 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
7474 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */
7475 AssertRCReturn(rc, rc);
7476
7477 PVM pVM = pVCpu->CTX_SUFF(pVM);
7478 rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
7479 if (RT_LIKELY(rc == VINF_SUCCESS))
7480 {
7481 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7482 Assert(pVmxTransient->cbInstr == 3);
7483 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
7484 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
7485 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7486 }
7487 else
7488 {
7489 AssertMsgFailed(("hmR0VmxExitRdtscp: EMInterpretRdtscp failed with %Rrc\n", rc));
7490 rc = VERR_EM_INTERPRETER;
7491 }
7492 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
7493 return rc;
7494}
7495
7496
7497/**
7498 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7499 */
7500HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7501{
7502 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7503 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
7504 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */
7505 AssertRCReturn(rc, rc);
7506
7507 PVM pVM = pVCpu->CTX_SUFF(pVM);
7508 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7509 if (RT_LIKELY(rc == VINF_SUCCESS))
7510 {
7511 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7512 Assert(pVmxTransient->cbInstr == 2);
7513 }
7514 else
7515 {
7516 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7517 rc = VERR_EM_INTERPRETER;
7518 }
7519 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
7520 return rc;
7521}
7522
7523
7524/**
7525 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7526 */
7527HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7528{
7529 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7530 PVM pVM = pVCpu->CTX_SUFF(pVM);
7531 Assert(!pVM->hm.s.fNestedPaging);
7532
7533 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
7534 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
7535 AssertRCReturn(rc, rc);
7536
7537 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
7538 rc = VBOXSTRICTRC_VAL(rc2);
7539 if (RT_LIKELY(rc == VINF_SUCCESS))
7540 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7541 else
7542 {
7543 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RX64 failed with %Rrc\n",
7544 pVmxTransient->uExitQualification, rc));
7545 }
7546 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
7547 return rc;
7548}
7549
7550
7551/**
7552 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7553 */
7554HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7555{
7556 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7557 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7558 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7559 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7560 AssertRCReturn(rc, rc);
7561
7562 PVM pVM = pVCpu->CTX_SUFF(pVM);
7563 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7564 if (RT_LIKELY(rc == VINF_SUCCESS))
7565 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7566 else
7567 {
7568 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
7569 rc = VERR_EM_INTERPRETER;
7570 }
7571 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
7572 return rc;
7573}
7574
7575
7576/**
7577 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7578 */
7579HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7580{
7581 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7582 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7583 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7584 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7585 AssertRCReturn(rc, rc);
7586
7587 PVM pVM = pVCpu->CTX_SUFF(pVM);
7588 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7589 rc = VBOXSTRICTRC_VAL(rc2);
7590 if (RT_LIKELY( rc == VINF_SUCCESS
7591 || rc == VINF_EM_HALT))
7592 {
7593 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7594 AssertRCReturn(rc3, rc3);
7595
7596 if ( rc == VINF_EM_HALT
7597 && EMShouldContinueAfterHalt(pVCpu, pMixedCtx))
7598 {
7599 rc = VINF_SUCCESS;
7600 }
7601 }
7602 else
7603 {
7604 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
7605 rc = VERR_EM_INTERPRETER;
7606 }
7607 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
7608 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
7609 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
7610 return rc;
7611}
7612
7613
7614/**
7615 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
7616 */
7617HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7618{
7619 /*
7620 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
7621 * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
7622 * executing VMCALL in VMX root operation. If we get here something funny is going on.
7623 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
7624 */
7625 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7626 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7627}
7628
7629
7630/**
7631 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
7632 */
7633HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7634{
7635 /*
7636 * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
7637 * root operation. If we get there there is something funny going on.
7638 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
7639 */
7640 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7641 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7642}
7643
7644
7645/**
7646 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
7647 */
7648HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7649{
7650 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
7651 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7652 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7653}
7654
7655
7656/**
7657 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
7658 */
7659HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7660{
7661 /*
7662 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
7663 * don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
7664 * See Intel spec. 25.3 "Other Causes of VM-exits".
7665 */
7666 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7667 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7668}
7669
7670
7671/**
7672 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
7673 * VM-exit.
7674 */
7675HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7676{
7677 /*
7678 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM. See Intel spec. "33.14.1 Default Treatment of
7679 * SMI Delivery" and "29.3 VMX Instructions" for "VMXON". It is -NOT- blocked in VMX non-root operation so we can potentially
7680 * still get these exits. See Intel spec. "23.8 Restrictions on VMX operation".
7681 */
7682 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7683 return VINF_SUCCESS; /** @todo r=ramshankar: correct?. */
7684}
7685
7686
7687/**
7688 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7689 * VM-exit.
7690 */
7691HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7692{
7693 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7694 return VINF_EM_RESET;
7695}
7696
7697
7698/**
7699 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7700 */
7701HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7702{
7703 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7704 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
7705 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
7706 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7707 AssertRCReturn(rc, rc);
7708
7709 pMixedCtx->rip++;
7710 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7711 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
7712 rc = VINF_SUCCESS;
7713 else
7714 rc = VINF_EM_HALT;
7715
7716 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
7717 return rc;
7718}
7719
7720
7721/**
7722 * VM-exit handler for instructions that result in a #UD exception delivered to
7723 * the guest.
7724 */
7725HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7726{
7727 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7728 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
7729 return VINF_SUCCESS;
7730}
7731
7732
7733/**
7734 * VM-exit handler for expiry of the VMX preemption timer.
7735 */
7736HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7737{
7738 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7739
7740 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
7741 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7742
7743 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7744 PVM pVM = pVCpu->CTX_SUFF(pVM);
7745 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7746 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
7747 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7748}
7749
7750
7751/**
7752 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7753 */
7754HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7755{
7756 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7757
7758 /* We expose XSETBV to the guest, fallback to the recompiler for emulation. */
7759 /** @todo check if XSETBV is supported by the recompiler. */
7760 return VERR_EM_INTERPRETER;
7761}
7762
7763
7764/**
7765 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7766 */
7767HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7768{
7769 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7770
7771 /* The guest should not invalidate the host CPU's TLBs, fallback to recompiler. */
7772 /** @todo implement EMInterpretInvpcid() */
7773 return VERR_EM_INTERPRETER;
7774}
7775
7776
7777/**
7778 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
7779 * Error VM-exit.
7780 */
7781HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7782{
7783 uint32_t uIntrState;
7784 HMVMXHCUINTREG uHCReg;
7785 uint64_t u64Val;
7786 uint32_t u32Val;
7787
7788 int rc = hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
7789 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
7790 rc |= hmR0VmxReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7791 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
7792 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
7793 AssertRCReturn(rc, rc);
7794
7795 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntrInfo));
7796 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7797 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7798 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));
7799
7800 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
7801 Log4(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
7802 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
7803 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
7804 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
7805 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
7806 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
7807 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
7808 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
7809 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
7810 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7811 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7812
7813 PVM pVM = pVCpu->CTX_SUFF(pVM);
7814 HMDumpRegs(pVM, pVCpu, pMixedCtx);
7815
7816 return VERR_VMX_INVALID_GUEST_STATE;
7817}
7818
7819
7820/**
7821 * VM-exit handler for VM-entry failure due to an MSR-load
7822 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
7823 */
7824HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7825{
7826 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7827 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7828}
7829
7830
7831/**
7832 * VM-exit handler for VM-entry failure due to a machine-check event
7833 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
7834 */
7835HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7836{
7837 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7838 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7839}
7840
7841
7842/**
7843 * VM-exit handler for all undefined reasons. Should never ever happen.. in
7844 * theory.
7845 */
7846HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7847{
7848 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
7849 return VERR_VMX_UNDEFINED_EXIT_CODE;
7850}
7851
7852
7853/**
7854 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
7855 * (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
7856 * Conditional VM-exit.
7857 */
7858HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7859{
7860 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7861
7862 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
7863 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
7864 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
7865 return VERR_EM_INTERPRETER;
7866 AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7867 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7868}
7869
7870
7871/**
7872 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
7873 */
7874HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7875{
7876 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7877
7878 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
7879 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
7880 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
7881 return VERR_EM_INTERPRETER;
7882 AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7883 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7884}
7885
7886
7887/**
7888 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7889 */
7890HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7891{
7892 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7893
7894 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
7895 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7896 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7897 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7898 AssertRCReturn(rc, rc);
7899
7900 PVM pVM = pVCpu->CTX_SUFF(pVM);
7901 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7902 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER,
7903 ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
7904 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
7905
7906 if (RT_LIKELY(rc == VINF_SUCCESS))
7907 {
7908 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7909 Assert(pVmxTransient->cbInstr == 2);
7910 }
7911 return rc;
7912}
7913
7914
7915/**
7916 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7917 */
7918HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7919{
7920 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
7921 PVM pVM = pVCpu->CTX_SUFF(pVM);
7922 int rc = VINF_SUCCESS;
7923
7924 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
7925 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7926 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7927 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7928 AssertRCReturn(rc, rc);
7929 Log4(("ecx=%#RX32\n", pMixedCtx->ecx));
7930
7931 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7932 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));
7933 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
7934
7935 if (RT_LIKELY(rc == VINF_SUCCESS))
7936 {
7937 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7938
7939 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7940 if ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START
7941 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
7942 {
7943 /* We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register
7944 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before
7945 EMInterpretWrmsr() changes it. */
7946 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
7947 }
7948 else if (pMixedCtx->ecx == MSR_K6_EFER) /* EFER is the only MSR we auto-load but don't allow write-passthrough. */
7949 {
7950 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
7951 AssertRCReturn(rc, rc);
7952 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS;
7953 }
7954 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7955 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7956
7957 /* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */
7958 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
7959 {
7960 switch (pMixedCtx->ecx)
7961 {
7962 case MSR_IA32_SYSENTER_CS: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_CS_MSR; break;
7963 case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;
7964 case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;
7965 case MSR_K8_FS_BASE: /* no break */
7966 case MSR_K8_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS; break;
7967 case MSR_K8_KERNEL_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS; break;
7968 }
7969 }
7970#ifdef VBOX_STRICT
7971 else
7972 {
7973 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7974 switch (pMixedCtx->ecx)
7975 {
7976 case MSR_IA32_SYSENTER_CS:
7977 case MSR_IA32_SYSENTER_EIP:
7978 case MSR_IA32_SYSENTER_ESP:
7979 case MSR_K8_FS_BASE:
7980 case MSR_K8_GS_BASE:
7981 {
7982 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
7983 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7984 }
7985
7986 case MSR_K8_LSTAR:
7987 case MSR_K6_STAR:
7988 case MSR_K8_SF_MASK:
7989 case MSR_K8_TSC_AUX:
7990 case MSR_K8_KERNEL_GS_BASE:
7991 {
7992 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
7993 pMixedCtx->ecx));
7994 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7995 }
7996 }
7997 }
7998#endif /* VBOX_STRICT */
7999 }
8000 return rc;
8001}
8002
8003
8004/**
8005 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8006 */
8007HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8008{
8009 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8010
8011 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT. */
8012 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
8013 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT)
8014 return VERR_EM_INTERPRETER;
8015 AssertMsgFailed(("Unexpected PAUSE exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8016 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8017}
8018
8019
8020/**
8021 * VM-exit handler for when the TPR value is lowered below the specified
8022 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8023 */
8024HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8025{
8026 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8027 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
8028
8029 /*
8030 * The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
8031 * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and
8032 * resume guest execution.
8033 */
8034 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
8035 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
8036 return VINF_SUCCESS;
8037}
8038
8039
8040/**
8041 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8042 * VM-exit.
8043 *
8044 * @retval VINF_SUCCESS when guest execution can continue.
8045 * @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
8046 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8047 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
8048 * recompiler.
8049 */
8050HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8051{
8052 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8053 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
8054 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8055 AssertRCReturn(rc, rc);
8056
8057 const RTGCUINTPTR uExitQualification = pVmxTransient->uExitQualification;
8058 const uint32_t uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
8059 PVM pVM = pVCpu->CTX_SUFF(pVM);
8060 switch (uAccessType)
8061 {
8062 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */
8063 {
8064#if 0
8065 /* EMInterpretCRxWrite() references a lot of guest state (EFER, RFLAGS, Segment Registers, etc.) Sync entire state */
8066 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8067#else
8068 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8069 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8070 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8071#endif
8072 AssertRCReturn(rc, rc);
8073
8074 rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
8075 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
8076 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification));
8077 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
8078
8079 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification))
8080 {
8081 case 0: /* CR0 */
8082 Log4(("CRX CR0 write rc=%d CR0=%#RX64\n", rc, pMixedCtx->cr0));
8083 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
8084 break;
8085 case 2: /* C2 **/
8086 /* Nothing to do here, CR2 it's not part of the VMCS. */
8087 break;
8088 case 3: /* CR3 */
8089 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
8090 Log4(("CRX CR3 write rc=%d CR3=%#RX64\n", rc, pMixedCtx->cr3));
8091 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
8092 break;
8093 case 4: /* CR4 */
8094 Log4(("CRX CR4 write rc=%d CR4=%#RX64\n", rc, pMixedCtx->cr4));
8095 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
8096 break;
8097 case 8: /* CR8 */
8098 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
8099 /* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */
8100 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
8101 break;
8102 default:
8103 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
8104 break;
8105 }
8106
8107 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
8108 break;
8109 }
8110
8111 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */
8112 {
8113 /* EMInterpretCRxRead() requires EFER MSR, CS. */
8114 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8115 AssertRCReturn(rc, rc);
8116 Assert( !pVM->hm.s.fNestedPaging
8117 || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
8118 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
8119
8120 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8121 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
8122 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
8123
8124 rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
8125 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification),
8126 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification));
8127 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8128 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
8129 Log4(("CRX CR%d Read access rc=%d\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), rc));
8130 break;
8131 }
8132
8133 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
8134 {
8135 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8136 AssertRCReturn(rc, rc);
8137 rc = EMInterpretCLTS(pVM, pVCpu);
8138 AssertRCReturn(rc, rc);
8139 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
8140 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
8141 Log4(("CRX CLTS write rc=%d\n", rc));
8142 break;
8143 }
8144
8145 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
8146 {
8147 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8148 AssertRCReturn(rc, rc);
8149 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
8150 if (RT_LIKELY(rc == VINF_SUCCESS))
8151 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
8152 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
8153 Log4(("CRX LMSW write rc=%d\n", rc));
8154 break;
8155 }
8156
8157 default:
8158 {
8159 AssertMsgFailed(("Invalid access-type in Mov CRx exit qualification %#x\n", uAccessType));
8160 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
8161 }
8162 }
8163
8164 /* Validate possible error codes. */
8165 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_CHANGE_MODE || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_SYNC_CR3
8166 || rc == VERR_VMX_UNEXPECTED_EXCEPTION);
8167 if (RT_SUCCESS(rc))
8168 {
8169 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8170 AssertRCReturn(rc2, rc2);
8171 }
8172
8173 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
8174 return rc;
8175}
8176
8177
8178/**
8179 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8180 * VM-exit.
8181 */
8182HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8183{
8184 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8185 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
8186
8187 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8188 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8189 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
8190 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */
8191 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
8192 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
8193 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8194 AssertRCReturn(rc, rc);
8195
8196 Log4(("CS:RIP=%04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
8197
8198 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8199 uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification);
8200 uint32_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification);
8201 bool fIOWrite = (VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)
8202 == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
8203 bool fIOString = (VMX_EXIT_QUALIFICATION_IO_STRING(pVmxTransient->uExitQualification) == 1);
8204 Assert(uIOWidth == 0 || uIOWidth == 1 || uIOWidth == 3);
8205
8206 /* I/O operation lookup arrays. */
8207 static const uint32_t s_aIOSize[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */
8208 static const uint32_t s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */
8209
8210 const uint32_t cbSize = s_aIOSize[uIOWidth];
8211 const uint32_t cbInstr = pVmxTransient->cbInstr;
8212 PVM pVM = pVCpu->CTX_SUFF(pVM);
8213 if (fIOString)
8214 {
8215 /* INS/OUTS - I/O String instruction. */
8216 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
8217 /** @todo for now manually disassemble later optimize by getting the fields from
8218 * the VMCS. VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR contains the flat pointer
8219 * operand of the instruction. VMX_VMCS32_RO_EXIT_INSTR_INFO contains
8220 * segment prefix info. */
8221 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
8222 if (RT_SUCCESS(rc))
8223 {
8224 if (fIOWrite)
8225 {
8226 VBOXSTRICTRC rc2 = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
8227 (DISCPUMODE)pDis->uAddrMode, cbSize);
8228 rc = VBOXSTRICTRC_VAL(rc2);
8229 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
8230 }
8231 else
8232 {
8233 VBOXSTRICTRC rc2 = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
8234 (DISCPUMODE)pDis->uAddrMode, cbSize);
8235 rc = VBOXSTRICTRC_VAL(rc2);
8236 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
8237 }
8238 }
8239 else
8240 {
8241 AssertMsg(rc == VERR_EM_INTERPRETER, ("rc=%Rrc RIP %#RX64\n", rc, pMixedCtx->rip));
8242 rc = VINF_EM_RAW_EMULATE_INSTR;
8243 }
8244 }
8245 else
8246 {
8247 /* IN/OUT - I/O instruction. */
8248 const uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
8249 Assert(!VMX_EXIT_QUALIFICATION_IO_REP(pVmxTransient->uExitQualification));
8250 if (fIOWrite)
8251 {
8252 VBOXSTRICTRC rc2 = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbSize);
8253 rc = VBOXSTRICTRC_VAL(rc2);
8254 if (rc == VINF_IOM_R3_IOPORT_WRITE)
8255 HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbSize);
8256 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
8257 }
8258 else
8259 {
8260 uint32_t u32Result = 0;
8261 VBOXSTRICTRC rc2 = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbSize);
8262 rc = VBOXSTRICTRC_VAL(rc2);
8263 if (IOM_SUCCESS(rc))
8264 {
8265 /* Save result of I/O IN instr. in AL/AX/EAX. */
8266 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8267 }
8268 else if (rc == VINF_IOM_R3_IOPORT_READ)
8269 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbSize);
8270 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
8271 }
8272 }
8273
8274 if (IOM_SUCCESS(rc))
8275 {
8276 pMixedCtx->rip += cbInstr;
8277 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
8278 if (RT_LIKELY(rc == VINF_SUCCESS))
8279 {
8280 rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx); /* For DR7. */
8281 AssertRCReturn(rc, rc);
8282
8283 /* If any IO breakpoints are armed, then we should check if a debug trap needs to be generated. */
8284 if (pMixedCtx->dr[7] & X86_DR7_ENABLED_MASK)
8285 {
8286 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
8287 for (unsigned i = 0; i < 4; i++)
8288 {
8289 uint32_t uBPLen = s_aIOSize[X86_DR7_GET_LEN(pMixedCtx->dr[7], i)];
8290 if ( ( uIOPort >= pMixedCtx->dr[i]
8291 && uIOPort < pMixedCtx->dr[i] + uBPLen)
8292 && (pMixedCtx->dr[7] & (X86_DR7_L(i) | X86_DR7_G(i)))
8293 && (pMixedCtx->dr[7] & X86_DR7_RW(i, X86_DR7_RW_IO)) == X86_DR7_RW(i, X86_DR7_RW_IO))
8294 {
8295 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8296 uint64_t uDR6 = ASMGetDR6();
8297
8298 /* Clear all breakpoint status flags and set the one we just hit. */
8299 uDR6 &= ~(X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3);
8300 uDR6 |= (uint64_t)RT_BIT(i);
8301
8302 /*
8303 * Note: AMD64 Architecture Programmer's Manual 13.1:
8304 * Bits 15:13 of the DR6 register is never cleared by the processor and must
8305 * be cleared by software after the contents have been read.
8306 */
8307 ASMSetDR6(uDR6);
8308
8309 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
8310 pMixedCtx->dr[7] &= ~X86_DR7_GD;
8311
8312 /* Paranoia. */
8313 pMixedCtx->dr[7] &= 0xffffffff; /* Upper 32 bits MBZ. */
8314 pMixedCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* MBZ. */
8315 pMixedCtx->dr[7] |= 0x400; /* MB1. */
8316
8317 /* Resync DR7 */
8318 /** @todo probably cheaper to just reload DR7, nothing else needs changing. */
8319 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
8320
8321 /* Set #DB to be injected into the VM and continue guest execution. */
8322 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
8323 break;
8324 }
8325 }
8326 }
8327 }
8328 }
8329
8330#ifdef DEBUG
8331 if (rc == VINF_IOM_R3_IOPORT_READ)
8332 Assert(!fIOWrite);
8333 else if (rc == VINF_IOM_R3_IOPORT_WRITE)
8334 Assert(fIOWrite);
8335 else
8336 {
8337 AssertMsg( RT_FAILURE(rc)
8338 || rc == VINF_SUCCESS
8339 || rc == VINF_EM_RAW_EMULATE_INSTR
8340 || rc == VINF_EM_RAW_GUEST_TRAP
8341 || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", rc));
8342 }
8343#endif
8344
8345 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
8346 return rc;
8347}
8348
8349
8350/**
8351 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8352 * VM-exit.
8353 */
8354HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8355{
8356 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8357
8358 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8359 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8360 AssertRCReturn(rc, rc);
8361 if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
8362 {
8363 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
8364 AssertRCReturn(rc, rc);
8365 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
8366 {
8367 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
8368
8369 /* Software interrupts and exceptions will be regenerated when the recompiler restarts the instruction. */
8370 if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
8371 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
8372 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
8373 {
8374 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
8375 bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
8376
8377 /* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */
8378 Assert(!pVCpu->hm.s.Event.fPending);
8379 pVCpu->hm.s.Event.fPending = true;
8380 pVCpu->hm.s.Event.u64IntrInfo = pVmxTransient->uIdtVectoringInfo;
8381 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
8382 AssertRCReturn(rc, rc);
8383 if (fErrorCodeValid)
8384 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
8385 else
8386 pVCpu->hm.s.Event.u32ErrCode = 0;
8387 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
8388 && uVector == X86_XCPT_PF)
8389 {
8390 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2;
8391 }
8392
8393 Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
8394 }
8395 }
8396 }
8397
8398 /** @todo Emulate task switch someday, currently just going back to ring-3 for
8399 * emulation. */
8400 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
8401 return VERR_EM_INTERPRETER;
8402}
8403
8404
8405/**
8406 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8407 */
8408HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8409{
8410 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8411 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
8412 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
8413 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
8414 AssertRCReturn(rc, rc);
8415 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
8416 return VINF_EM_DBG_STOP;
8417}
8418
8419
8420/**
8421 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8422 */
8423HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8424{
8425 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8426
8427 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8428 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
8429 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
8430 return VINF_SUCCESS;
8431 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8432 return rc;
8433
8434#if 0
8435 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
8436 * just sync the whole thing. */
8437 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8438#else
8439 /* Aggressive state sync. for now. */
8440 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8441 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8442 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8443#endif
8444 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8445 AssertRCReturn(rc, rc);
8446
8447 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8448 uint32_t uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
8449 switch (uAccessType)
8450 {
8451 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8452 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8453 {
8454 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8455 && VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) == 0x80)
8456 {
8457 AssertMsgFailed(("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8458 }
8459
8460 RTGCPHYS GCPhys = pMixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */
8461 GCPhys &= PAGE_BASE_GC_MASK;
8462 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
8463 PVM pVM = pVCpu->CTX_SUFF(pVM);
8464 Log4(("ApicAccess uAccessType=%#x GCPhys=%#RGv Off=%#x\n", uAccessType, GCPhys,
8465 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
8466
8467 VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu,
8468 (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW,
8469 CPUMCTX2CORE(pMixedCtx), GCPhys);
8470 rc = VBOXSTRICTRC_VAL(rc2);
8471 Log4(("ApicAccess rc=%d\n", rc));
8472 if ( rc == VINF_SUCCESS
8473 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8474 || rc == VERR_PAGE_NOT_PRESENT)
8475 {
8476 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8477 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8478 rc = VINF_SUCCESS;
8479 }
8480 break;
8481 }
8482
8483 default:
8484 Log4(("ApicAccess uAccessType=%#x\n", uAccessType));
8485 rc = VINF_EM_RAW_EMULATE_INSTR;
8486 break;
8487 }
8488
8489 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
8490 return rc;
8491}
8492
8493
8494/**
8495 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8496 * VM-exit.
8497 */
8498HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8499{
8500 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8501
8502 /* We should -not- get this VM-exit if the guest is debugging. */
8503 if (CPUMIsGuestDebugStateActive(pVCpu))
8504 {
8505 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8506 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8507 }
8508
8509 int rc = VERR_INTERNAL_ERROR_5;
8510 if ( !DBGFIsStepping(pVCpu)
8511 && !CPUMIsHyperDebugStateActive(pVCpu))
8512 {
8513 /* Don't intercept MOV DRx. */
8514 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
8515 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
8516 AssertRCReturn(rc, rc);
8517
8518 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8519 PVM pVM = pVCpu->CTX_SUFF(pVM);
8520 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
8521 AssertRC(rc);
8522 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8523
8524#ifdef VBOX_WITH_STATISTICS
8525 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8526 AssertRCReturn(rc, rc);
8527 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
8528 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
8529 else
8530 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
8531#endif
8532 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
8533 return VINF_SUCCESS;
8534 }
8535
8536 /*
8537 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date, see
8538 * hmR0VmxSaveGuestAutoLoadStoreMsrs(). Update only the segment registers from the CPU.
8539 */
8540 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8541 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8542 AssertRCReturn(rc, rc);
8543
8544 PVM pVM = pVCpu->CTX_SUFF(pVM);
8545 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
8546 {
8547 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
8548 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),
8549 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
8550 if (RT_SUCCESS(rc))
8551 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
8552 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
8553 }
8554 else
8555 {
8556 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
8557 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification),
8558 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));
8559 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
8560 }
8561
8562 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8563 if (RT_SUCCESS(rc))
8564 {
8565 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8566 AssertRCReturn(rc2, rc2);
8567 }
8568 return rc;
8569}
8570
8571
8572/**
8573 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8574 * Conditional VM-exit.
8575 */
8576HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8577{
8578 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8579 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
8580
8581 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8582 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
8583 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
8584 return VINF_SUCCESS;
8585 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8586 return rc;
8587
8588 RTGCPHYS GCPhys = 0;
8589 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
8590
8591#if 0
8592 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
8593#else
8594 /* Aggressive state sync. for now. */
8595 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8596 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8597 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8598#endif
8599 AssertRCReturn(rc, rc);
8600
8601 /*
8602 * If we succeed, resume guest execution.
8603 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8604 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8605 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8606 * weird case. See @bugref{6043}.
8607 */
8608 PVM pVM = pVCpu->CTX_SUFF(pVM);
8609 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
8610 rc = VBOXSTRICTRC_VAL(rc2);
8611 Log4(("EPT misconfig at %#RGv RIP=%#RX64 rc=%d\n", GCPhys, pMixedCtx->rip, rc));
8612 if ( rc == VINF_SUCCESS
8613 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8614 || rc == VERR_PAGE_NOT_PRESENT)
8615 {
8616 /* Successfully handled MMIO operation. */
8617 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8618 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8619 rc = VINF_SUCCESS;
8620 }
8621 return rc;
8622}
8623
8624
8625/**
8626 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8627 * VM-exit.
8628 */
8629HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8630{
8631 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8632 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
8633
8634 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8635 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
8636 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
8637 return VINF_SUCCESS;
8638 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8639 return rc;
8640
8641 RTGCPHYS GCPhys = 0;
8642 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
8643 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8644#if 0
8645 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
8646#else
8647 /* Aggressive state sync. for now. */
8648 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8649 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8650 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8651#endif
8652 AssertRCReturn(rc, rc);
8653
8654 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
8655 AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));
8656
8657 RTGCUINT uErrorCode = 0;
8658 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
8659 uErrorCode |= X86_TRAP_PF_ID;
8660 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
8661 uErrorCode |= X86_TRAP_PF_RW;
8662 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
8663 uErrorCode |= X86_TRAP_PF_P;
8664
8665 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8666
8667 Log4(("EPT violation %#x at %#RX64 ErrorCode %#x CS:EIP=%04x:%#RX64\n", pVmxTransient->uExitQualification, GCPhys,
8668 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
8669
8670 /* Handle the pagefault trap for the nested shadow table. */
8671 PVM pVM = pVCpu->CTX_SUFF(pVM);
8672 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
8673 TRPMResetTrap(pVCpu);
8674
8675 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8676 if ( rc == VINF_SUCCESS
8677 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8678 || rc == VERR_PAGE_NOT_PRESENT)
8679 {
8680 /* Successfully synced our nested page tables. */
8681 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
8682 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS;
8683 return VINF_SUCCESS;
8684 }
8685
8686 Log4(("EPT return to ring-3 rc=%d\n"));
8687 return rc;
8688}
8689
8690/** @} */
8691
8692/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
8693/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
8694/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
8695
8696/** @name VM-exit exception handlers.
8697 * @{
8698 */
8699
8700/**
8701 * VM-exit exception handler for #MF (Math Fault: floating point exception).
8702 */
8703static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8704{
8705 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8706 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
8707
8708 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8709 AssertRCReturn(rc, rc);
8710
8711 if (!(pMixedCtx->cr0 & X86_CR0_NE))
8712 {
8713 /* Old-style FPU error reporting needs some extra work. */
8714 /** @todo don't fall back to the recompiler, but do it manually. */
8715 return VERR_EM_INTERPRETER;
8716 }
8717 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8718 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8719 return rc;
8720}
8721
8722
8723/**
8724 * VM-exit exception handler for #BP (Breakpoint exception).
8725 */
8726static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8727{
8728 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8729 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
8730
8731 /** @todo Try optimize this by not saving the entire guest state unless
8732 * really needed. */
8733 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8734 AssertRCReturn(rc, rc);
8735
8736 PVM pVM = pVCpu->CTX_SUFF(pVM);
8737 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8738 if (rc == VINF_EM_RAW_GUEST_TRAP)
8739 {
8740 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8741 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8742 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8743 AssertRCReturn(rc, rc);
8744
8745 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8746 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8747 }
8748
8749 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
8750 return rc;
8751}
8752
8753
8754/**
8755 * VM-exit exception handler for #DB (Debug exception).
8756 */
8757static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8758{
8759 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8760 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
8761
8762 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8763 AssertRCReturn(rc, rc);
8764
8765 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
8766 uint64_t uDR6 = X86_DR6_INIT_VAL;
8767 uDR6 |= (pVmxTransient->uExitQualification
8768 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
8769 PVM pVM = pVCpu->CTX_SUFF(pVM);
8770 rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6);
8771 if (rc == VINF_EM_RAW_GUEST_TRAP)
8772 {
8773 /* DR6, DR7.GD and IA32_DEBUGCTL.LBR are not updated yet. See Intel spec. 27.1 "Architectural State before a VM-Exit". */
8774 pMixedCtx->dr[6] = uDR6;
8775
8776 if (CPUMIsGuestDebugStateActive(pVCpu))
8777 ASMSetDR6(pMixedCtx->dr[6]);
8778
8779 rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx);
8780
8781 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
8782 pMixedCtx->dr[7] &= ~X86_DR7_GD;
8783
8784 /* Paranoia. */
8785 pMixedCtx->dr[7] &= 0xffffffff; /* Upper 32 bits MBZ. */
8786 pMixedCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* MBZ. */
8787 pMixedCtx->dr[7] |= 0x400; /* MB1. */
8788
8789 rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
8790 AssertRCReturn(rc,rc);
8791
8792 int rc2 = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8793 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8794 rc2 |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8795 AssertRCReturn(rc2, rc2);
8796 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8797 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8798 rc = VINF_SUCCESS;
8799 }
8800
8801 return rc;
8802}
8803
8804
8805/**
8806 * VM-exit exception handler for #NM (Device-not-available exception: floating
8807 * point exception).
8808 */
8809static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8810{
8811 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8812
8813#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
8814 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
8815#endif
8816
8817 /* We require CR0 and EFER. EFER is always up-to-date. */
8818 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8819 AssertRCReturn(rc, rc);
8820
8821 /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */
8822 PVM pVM = pVCpu->CTX_SUFF(pVM);
8823 rc = CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8824 if (rc == VINF_SUCCESS)
8825 {
8826 Assert(CPUMIsGuestFPUStateActive(pVCpu));
8827 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
8828 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
8829 return VINF_SUCCESS;
8830 }
8831
8832 /* Forward #NM to the guest. */
8833 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
8834 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8835 AssertRCReturn(rc, rc);
8836 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8837 pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);
8838 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
8839 return rc;
8840}
8841
8842
8843/**
8844 * VM-exit exception handler for #GP (General-protection exception).
8845 *
8846 * @remarks Requires pVmxTransient->uExitIntrInfo to be up-to-date.
8847 */
8848static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8849{
8850 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8851 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
8852
8853 int rc = VERR_INTERNAL_ERROR_5;
8854 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
8855 {
8856#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
8857 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
8858 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8859 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8860 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8861 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8862 AssertRCReturn(rc, rc);
8863 Log4(("#GP Gst: RIP %#RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u\n", pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode,
8864 pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu)));
8865 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8866 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8867 return rc;
8868#else
8869 /* We don't intercept #GP. */
8870 AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n"));
8871 return VERR_VMX_UNEXPECTED_EXCEPTION;
8872#endif
8873 }
8874
8875 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
8876 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
8877
8878 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
8879 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8880 AssertRCReturn(rc, rc);
8881
8882 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
8883 uint32_t cbOp = 0;
8884 PVM pVM = pVCpu->CTX_SUFF(pVM);
8885 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
8886 if (RT_SUCCESS(rc))
8887 {
8888 rc = VINF_SUCCESS;
8889 Assert(cbOp == pDis->cbInstr);
8890 Log4(("#GP Disas OpCode=%u CS:EIP %04x:%#RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
8891 switch (pDis->pCurInstr->uOpcode)
8892 {
8893 case OP_CLI:
8894 {
8895 pMixedCtx->eflags.Bits.u1IF = 0;
8896 pMixedCtx->rip += pDis->cbInstr;
8897 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
8898 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
8899 break;
8900 }
8901
8902 case OP_STI:
8903 {
8904 pMixedCtx->eflags.Bits.u1IF = 1;
8905 pMixedCtx->rip += pDis->cbInstr;
8906 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
8907 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
8908 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
8909 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
8910 break;
8911 }
8912
8913 case OP_HLT:
8914 {
8915 rc = VINF_EM_HALT;
8916 pMixedCtx->rip += pDis->cbInstr;
8917 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
8918 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
8919 break;
8920 }
8921
8922 case OP_POPF:
8923 {
8924 Log4(("POPF CS:RIP %04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
8925 uint32_t cbParm = 0;
8926 uint32_t uMask = 0;
8927 if (pDis->fPrefix & DISPREFIX_OPSIZE)
8928 {
8929 cbParm = 4;
8930 uMask = 0xffffffff;
8931 }
8932 else
8933 {
8934 cbParm = 2;
8935 uMask = 0xffff;
8936 }
8937
8938 /* Get the stack pointer & pop the contents of the stack onto EFlags. */
8939 RTGCPTR GCPtrStack = 0;
8940 X86EFLAGS uEflags;
8941 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
8942 &GCPtrStack);
8943 if (RT_SUCCESS(rc))
8944 {
8945 Assert(sizeof(uEflags.u32) >= cbParm);
8946 uEflags.u32 = 0;
8947 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &uEflags.u32, cbParm);
8948 }
8949 if (RT_FAILURE(rc))
8950 {
8951 rc = VERR_EM_INTERPRETER;
8952 break;
8953 }
8954 Log4(("POPF %x -> %#RX64 mask=%x RIP=%#RX64\n", uEflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
8955 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
8956 | (uEflags.u32 & X86_EFL_POPF_BITS & uMask);
8957 /* The RF bit is always cleared by POPF; see Intel Instruction reference for POPF. */
8958 pMixedCtx->eflags.Bits.u1RF = 0;
8959 pMixedCtx->esp += cbParm;
8960 pMixedCtx->esp &= uMask;
8961 pMixedCtx->rip += pDis->cbInstr;
8962 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS;
8963 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
8964 break;
8965 }
8966
8967 case OP_PUSHF:
8968 {
8969 uint32_t cbParm = 0;
8970 uint32_t uMask = 0;
8971 if (pDis->fPrefix & DISPREFIX_OPSIZE)
8972 {
8973 cbParm = 4;
8974 uMask = 0xffffffff;
8975 }
8976 else
8977 {
8978 cbParm = 2;
8979 uMask = 0xffff;
8980 }
8981
8982 /* Get the stack pointer & push the contents of eflags onto the stack. */
8983 RTGCPTR GCPtrStack = 0;
8984 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
8985 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
8986 if (RT_FAILURE(rc))
8987 {
8988 rc = VERR_EM_INTERPRETER;
8989 break;
8990 }
8991 X86EFLAGS uEflags;
8992 uEflags = pMixedCtx->eflags;
8993 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
8994 uEflags.Bits.u1RF = 0;
8995 uEflags.Bits.u1VM = 0;
8996
8997 rc = PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &uEflags.u, cbParm);
8998 if (RT_FAILURE(rc))
8999 {
9000 rc = VERR_EM_INTERPRETER;
9001 break;
9002 }
9003 Log4(("PUSHF %x -> %#RGv\n", uEflags.u, GCPtrStack));
9004 pMixedCtx->esp -= cbParm;
9005 pMixedCtx->esp &= uMask;
9006 pMixedCtx->rip += pDis->cbInstr;
9007 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP;
9008 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
9009 break;
9010 }
9011
9012 case OP_IRET:
9013 {
9014 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
9015 * instruction reference. */
9016 RTGCPTR GCPtrStack = 0;
9017 uint32_t uMask = 0xffff;
9018 uint16_t aIretFrame[3];
9019 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
9020 {
9021 rc = VERR_EM_INTERPRETER;
9022 break;
9023 }
9024 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
9025 &GCPtrStack);
9026 if (RT_SUCCESS(rc))
9027 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame));
9028 if (RT_FAILURE(rc))
9029 {
9030 rc = VERR_EM_INTERPRETER;
9031 break;
9032 }
9033 pMixedCtx->eip = 0;
9034 pMixedCtx->ip = aIretFrame[0];
9035 pMixedCtx->cs.Sel = aIretFrame[1];
9036 pMixedCtx->cs.ValidSel = aIretFrame[1];
9037 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
9038 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
9039 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
9040 pMixedCtx->sp += sizeof(aIretFrame);
9041 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_SEGMENT_REGS | HM_CHANGED_GUEST_RSP
9042 | HM_CHANGED_GUEST_RFLAGS;
9043 Log4(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
9044 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
9045 break;
9046 }
9047
9048 case OP_INT:
9049 {
9050 uint16_t uVector = pDis->Param1.uValue & 0xff;
9051 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
9052 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
9053 break;
9054 }
9055
9056 case OP_INTO:
9057 {
9058 if (pMixedCtx->eflags.Bits.u1OF)
9059 {
9060 hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
9061 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
9062 }
9063 break;
9064 }
9065
9066 default:
9067 {
9068 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
9069 EMCODETYPE_SUPERVISOR);
9070 rc = VBOXSTRICTRC_VAL(rc2);
9071 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
9072 Log4(("#GP rc=%Rrc\n", rc));
9073 break;
9074 }
9075 }
9076 }
9077 else
9078 rc = VERR_EM_INTERPRETER;
9079
9080 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
9081 ("#GP Unexpected rc=%Rrc\n", rc));
9082 return rc;
9083}
9084
9085
9086/**
9087 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
9088 * the exception reported in the VMX transient structure back into the VM.
9089 *
9090 * @remarks Requires uExitIntrInfo in the VMX transient structure to be
9091 * up-to-date.
9092 */
9093static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9094{
9095 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
9096
9097 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
9098 hmR0VmxCheckExitDueToEventDelivery(). */
9099 int rc = hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
9100 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9101 AssertRCReturn(rc, rc);
9102 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
9103 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
9104 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
9105 return VINF_SUCCESS;
9106}
9107
9108
9109/**
9110 * VM-exit exception handler for #PF (Page-fault exception).
9111 */
9112static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9113{
9114 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
9115 PVM pVM = pVCpu->CTX_SUFF(pVM);
9116 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9117 rc |= hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
9118 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
9119 AssertRCReturn(rc, rc);
9120
9121#if defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) || defined(HMVMX_ALWAYS_TRAP_PF)
9122 if (pVM->hm.s.fNestedPaging)
9123 {
9124 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
9125 if (RT_LIKELY(!pVmxTransient->fVectoringPF))
9126 {
9127 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
9128 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
9129 0 /* cbInstr */, pVmxTransient->uExitIntrErrorCode, pVmxTransient->uExitQualification);
9130 }
9131 else
9132 {
9133 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
9134 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
9135 Log4(("Pending #DF due to vectoring #PF. NP\n"));
9136 }
9137 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
9138 return rc;
9139 }
9140#else
9141 Assert(!pVM->hm.s.fNestedPaging);
9142#endif
9143
9144 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9145 AssertRCReturn(rc, rc);
9146
9147 Log4(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
9148 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode, pMixedCtx->cr3));
9149
9150 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntrErrorCode);
9151 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntrErrorCode, CPUMCTX2CORE(pMixedCtx),
9152 (RTGCPTR)pVmxTransient->uExitQualification);
9153
9154 Log4(("#PF: rc=%Rrc\n", rc));
9155 if (rc == VINF_SUCCESS)
9156 {
9157 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
9158 /** @todo this isn't quite right, what if guest does lgdt with some MMIO
9159 * memory? We don't update the whole state here... */
9160 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9161 | HM_CHANGED_VMX_GUEST_APIC_STATE;
9162 TRPMResetTrap(pVCpu);
9163 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
9164 return rc;
9165 }
9166 else if (rc == VINF_EM_RAW_GUEST_TRAP)
9167 {
9168 if (!pVmxTransient->fVectoringPF)
9169 {
9170 /* It's a guest page fault and needs to be reflected to the guest. */
9171 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
9172 TRPMResetTrap(pVCpu);
9173 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
9174 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
9175 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
9176 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
9177 }
9178 else
9179 {
9180 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
9181 TRPMResetTrap(pVCpu);
9182 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
9183 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
9184 Log4(("#PF: Pending #DF due to vectoring #PF\n"));
9185 }
9186
9187 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
9188 return VINF_SUCCESS;
9189 }
9190
9191 TRPMResetTrap(pVCpu);
9192 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
9193 return rc;
9194}
9195
9196/** @} */
9197
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette