VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 52182

Last change on this file since 52182 was 52182, checked in by vboxsync, 10 years ago

VMM/HMVMXR0: Wrong, EMT preemption doesn't lose VMCS state.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 498.7 KB
Line 
1/* $Id: HMVMXR0.cpp 52182 2014-07-25 06:21:21Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2014 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <iprt/x86.h>
23#include <iprt/asm-amd64-x86.h>
24#include <iprt/thread.h>
25
26#include "HMInternal.h"
27#include <VBox/vmm/vm.h>
28#include "HMVMXR0.h"
29#include <VBox/vmm/pdmapi.h>
30#include <VBox/vmm/dbgf.h>
31#include <VBox/vmm/iem.h>
32#include <VBox/vmm/iom.h>
33#include <VBox/vmm/selm.h>
34#include <VBox/vmm/tm.h>
35#include <VBox/vmm/gim.h>
36#ifdef VBOX_WITH_REM
37# include <VBox/vmm/rem.h>
38#endif
39#ifdef DEBUG_ramshankar
40# define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS
41# define HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE
42# define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
43# define HMVMX_ALWAYS_CHECK_GUEST_STATE
44# define HMVMX_ALWAYS_TRAP_ALL_XCPTS
45# define HMVMX_ALWAYS_TRAP_PF
46# define HMVMX_ALWAYS_SWAP_FPU_STATE
47# define HMVMX_ALWAYS_FLUSH_TLB
48# define HMVMX_ALWAYS_SWAP_EFER
49#endif
50
51
52/*******************************************************************************
53* Defined Constants And Macros *
54*******************************************************************************/
55#if defined(RT_ARCH_AMD64)
56# define HMVMX_IS_64BIT_HOST_MODE() (true)
57typedef RTHCUINTREG HMVMXHCUINTREG;
58#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
59extern "C" uint32_t g_fVMXIs64bitHost;
60# define HMVMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)
61typedef uint64_t HMVMXHCUINTREG;
62#else
63# define HMVMX_IS_64BIT_HOST_MODE() (false)
64typedef RTHCUINTREG HMVMXHCUINTREG;
65#endif
66
67/** Use the function table. */
68#define HMVMX_USE_FUNCTION_TABLE
69
70/** Determine which tagged-TLB flush handler to use. */
71#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
72#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
73#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
74#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
75
76/** @name Updated-guest-state flags.
77 * @{ */
78#define HMVMX_UPDATED_GUEST_RIP RT_BIT(0)
79#define HMVMX_UPDATED_GUEST_RSP RT_BIT(1)
80#define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(2)
81#define HMVMX_UPDATED_GUEST_CR0 RT_BIT(3)
82#define HMVMX_UPDATED_GUEST_CR3 RT_BIT(4)
83#define HMVMX_UPDATED_GUEST_CR4 RT_BIT(5)
84#define HMVMX_UPDATED_GUEST_GDTR RT_BIT(6)
85#define HMVMX_UPDATED_GUEST_IDTR RT_BIT(7)
86#define HMVMX_UPDATED_GUEST_LDTR RT_BIT(8)
87#define HMVMX_UPDATED_GUEST_TR RT_BIT(9)
88#define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10)
89#define HMVMX_UPDATED_GUEST_DEBUG RT_BIT(11)
90#define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(12)
91#define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(13)
92#define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(14)
93#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(15)
94#define HMVMX_UPDATED_GUEST_LAZY_MSRS RT_BIT(16)
95#define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(17)
96#define HMVMX_UPDATED_GUEST_INTR_STATE RT_BIT(18)
97#define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(19)
98#define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \
99 | HMVMX_UPDATED_GUEST_RSP \
100 | HMVMX_UPDATED_GUEST_RFLAGS \
101 | HMVMX_UPDATED_GUEST_CR0 \
102 | HMVMX_UPDATED_GUEST_CR3 \
103 | HMVMX_UPDATED_GUEST_CR4 \
104 | HMVMX_UPDATED_GUEST_GDTR \
105 | HMVMX_UPDATED_GUEST_IDTR \
106 | HMVMX_UPDATED_GUEST_LDTR \
107 | HMVMX_UPDATED_GUEST_TR \
108 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \
109 | HMVMX_UPDATED_GUEST_DEBUG \
110 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \
111 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \
112 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \
113 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \
114 | HMVMX_UPDATED_GUEST_LAZY_MSRS \
115 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \
116 | HMVMX_UPDATED_GUEST_INTR_STATE \
117 | HMVMX_UPDATED_GUEST_APIC_STATE)
118/** @} */
119
120/** @name
121 * Flags to skip redundant reads of some common VMCS fields that are not part of
122 * the guest-CPU state but are in the transient structure.
123 */
124#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0)
125#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1)
126#define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2)
127#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3)
128#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4)
129#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5)
130#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO RT_BIT(6)
131/** @} */
132
133/** @name
134 * States of the VMCS.
135 *
136 * This does not reflect all possible VMCS states but currently only those
137 * needed for maintaining the VMCS consistently even when thread-context hooks
138 * are used. Maybe later this can be extended (i.e. Nested Virtualization).
139 */
140#define HMVMX_VMCS_STATE_CLEAR RT_BIT(0)
141#define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1)
142#define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2)
143/** @} */
144
145/**
146 * Exception bitmap mask for real-mode guests (real-on-v86).
147 *
148 * We need to intercept all exceptions manually (except #PF). #NM is also
149 * handled separately, see hmR0VmxLoadSharedCR0(). #PF need not be intercepted
150 * even in real-mode if we have Nested Paging support.
151 */
152#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NMI) \
153 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
154 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \
155 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
156 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
157 | RT_BIT(X86_XCPT_MF) | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) \
158 | RT_BIT(X86_XCPT_XF))
159
160/**
161 * Exception bitmap mask for all contributory exceptions.
162 *
163 * Page fault is deliberately excluded here as it's conditional as to whether
164 * it's contributory or benign. Page faults are handled separately.
165 */
166#define HMVMX_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
167 | RT_BIT(X86_XCPT_DE))
168
169/** Maximum VM-instruction error number. */
170#define HMVMX_INSTR_ERROR_MAX 28
171
172/** Profiling macro. */
173#ifdef HM_PROFILE_EXIT_DISPATCH
174# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
175# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
176#else
177# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
178# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
179#endif
180
181/** Assert that preemption is disabled or covered by thread-context hooks. */
182#define HMVMX_ASSERT_PREEMPT_SAFE() Assert( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
183 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
184
185/** Assert that we haven't migrated CPUs when thread-context hooks are not
186 * used. */
187#define HMVMX_ASSERT_CPU_SAFE() AssertMsg( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
188 || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
189 ("Illegal migration! Entered on CPU %u Current %u\n", \
190 pVCpu->hm.s.idEnteredCpu, RTMpCpuId())); \
191
192/** Helper macro for VM-exit handlers called unexpectedly. */
193#define HMVMX_RETURN_UNEXPECTED_EXIT() \
194 do { \
195 pVCpu->hm.s.u32HMError = pVmxTransient->uExitReason; \
196 return VERR_VMX_UNEXPECTED_EXIT; \
197 } while (0)
198
199
200/*******************************************************************************
201* Structures and Typedefs *
202*******************************************************************************/
203/**
204 * VMX transient state.
205 *
206 * A state structure for holding miscellaneous information across
207 * VMX non-root operation and restored after the transition.
208 */
209typedef struct VMXTRANSIENT
210{
211 /** The host's rflags/eflags. */
212 RTCCUINTREG uEflags;
213#if HC_ARCH_BITS == 32
214 uint32_t u32Alignment0;
215#endif
216 /** The guest's TPR value used for TPR shadowing. */
217 uint8_t u8GuestTpr;
218 /** Alignment. */
219 uint8_t abAlignment0[7];
220
221 /** The basic VM-exit reason. */
222 uint16_t uExitReason;
223 /** Alignment. */
224 uint16_t u16Alignment0;
225 /** The VM-exit interruption error code. */
226 uint32_t uExitIntErrorCode;
227 /** The VM-exit exit qualification. */
228 uint64_t uExitQualification;
229
230 /** The VM-exit interruption-information field. */
231 uint32_t uExitIntInfo;
232 /** The VM-exit instruction-length field. */
233 uint32_t cbInstr;
234 /** The VM-exit instruction-information field. */
235 union
236 {
237 /** Plain unsigned int representation. */
238 uint32_t u;
239 /** INS and OUTS information. */
240 struct
241 {
242 uint32_t u6Reserved0 : 7;
243 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
244 uint32_t u3AddrSize : 3;
245 uint32_t u5Reserved1 : 5;
246 /** The segment register (X86_SREG_XXX). */
247 uint32_t iSegReg : 3;
248 uint32_t uReserved2 : 14;
249 } StrIo;
250 } ExitInstrInfo;
251 /** Whether the VM-entry failed or not. */
252 bool fVMEntryFailed;
253 /** Alignment. */
254 uint8_t abAlignment1[3];
255
256 /** The VM-entry interruption-information field. */
257 uint32_t uEntryIntInfo;
258 /** The VM-entry exception error code field. */
259 uint32_t uEntryXcptErrorCode;
260 /** The VM-entry instruction length field. */
261 uint32_t cbEntryInstr;
262
263 /** IDT-vectoring information field. */
264 uint32_t uIdtVectoringInfo;
265 /** IDT-vectoring error code. */
266 uint32_t uIdtVectoringErrorCode;
267
268 /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */
269 uint32_t fVmcsFieldsRead;
270
271 /** Whether the guest FPU was active at the time of VM-exit. */
272 bool fWasGuestFPUStateActive;
273 /** Whether the guest debug state was active at the time of VM-exit. */
274 bool fWasGuestDebugStateActive;
275 /** Whether the hyper debug state was active at the time of VM-exit. */
276 bool fWasHyperDebugStateActive;
277 /** Whether TSC-offsetting should be setup before VM-entry. */
278 bool fUpdateTscOffsettingAndPreemptTimer;
279 /** Whether the VM-exit was caused by a page-fault during delivery of a
280 * contributory exception or a page-fault. */
281 bool fVectoringPF;
282} VMXTRANSIENT;
283AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
284AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntInfo, sizeof(uint64_t));
285AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo, sizeof(uint64_t));
286AssertCompileMemberAlignment(VMXTRANSIENT, fWasGuestFPUStateActive, sizeof(uint64_t));
287AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
288/** Pointer to VMX transient state. */
289typedef VMXTRANSIENT *PVMXTRANSIENT;
290
291
292/**
293 * MSR-bitmap read permissions.
294 */
295typedef enum VMXMSREXITREAD
296{
297 /** Reading this MSR causes a VM-exit. */
298 VMXMSREXIT_INTERCEPT_READ = 0xb,
299 /** Reading this MSR does not cause a VM-exit. */
300 VMXMSREXIT_PASSTHRU_READ
301} VMXMSREXITREAD;
302/** Pointer to MSR-bitmap read permissions. */
303typedef VMXMSREXITREAD* PVMXMSREXITREAD;
304
305/**
306 * MSR-bitmap write permissions.
307 */
308typedef enum VMXMSREXITWRITE
309{
310 /** Writing to this MSR causes a VM-exit. */
311 VMXMSREXIT_INTERCEPT_WRITE = 0xd,
312 /** Writing to this MSR does not cause a VM-exit. */
313 VMXMSREXIT_PASSTHRU_WRITE
314} VMXMSREXITWRITE;
315/** Pointer to MSR-bitmap write permissions. */
316typedef VMXMSREXITWRITE* PVMXMSREXITWRITE;
317
318
319/**
320 * VMX VM-exit handler.
321 *
322 * @returns VBox status code.
323 * @param pVCpu Pointer to the VMCPU.
324 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
325 * out-of-sync. Make sure to update the required
326 * fields before using them.
327 * @param pVmxTransient Pointer to the VMX-transient structure.
328 */
329#ifndef HMVMX_USE_FUNCTION_TABLE
330typedef int FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
331#else
332typedef DECLCALLBACK(int) FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
333/** Pointer to VM-exit handler. */
334typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
335#endif
336
337
338/*******************************************************************************
339* Internal Functions *
340*******************************************************************************/
341static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush);
342static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr);
343static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
344 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntState);
345#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
346static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
347#endif
348#ifndef HMVMX_USE_FUNCTION_TABLE
349DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
350# define HMVMX_EXIT_DECL static int
351#else
352# define HMVMX_EXIT_DECL static DECLCALLBACK(int)
353#endif
354
355/** @name VM-exit handlers.
356 * @{
357 */
358static FNVMXEXITHANDLER hmR0VmxExitXcptOrNmi;
359static FNVMXEXITHANDLER hmR0VmxExitExtInt;
360static FNVMXEXITHANDLER hmR0VmxExitTripleFault;
361static FNVMXEXITHANDLER hmR0VmxExitInitSignal;
362static FNVMXEXITHANDLER hmR0VmxExitSipi;
363static FNVMXEXITHANDLER hmR0VmxExitIoSmi;
364static FNVMXEXITHANDLER hmR0VmxExitSmi;
365static FNVMXEXITHANDLER hmR0VmxExitIntWindow;
366static FNVMXEXITHANDLER hmR0VmxExitNmiWindow;
367static FNVMXEXITHANDLER hmR0VmxExitTaskSwitch;
368static FNVMXEXITHANDLER hmR0VmxExitCpuid;
369static FNVMXEXITHANDLER hmR0VmxExitGetsec;
370static FNVMXEXITHANDLER hmR0VmxExitHlt;
371static FNVMXEXITHANDLER hmR0VmxExitInvd;
372static FNVMXEXITHANDLER hmR0VmxExitInvlpg;
373static FNVMXEXITHANDLER hmR0VmxExitRdpmc;
374static FNVMXEXITHANDLER hmR0VmxExitVmcall;
375static FNVMXEXITHANDLER hmR0VmxExitRdtsc;
376static FNVMXEXITHANDLER hmR0VmxExitRsm;
377static FNVMXEXITHANDLER hmR0VmxExitSetPendingXcptUD;
378static FNVMXEXITHANDLER hmR0VmxExitMovCRx;
379static FNVMXEXITHANDLER hmR0VmxExitMovDRx;
380static FNVMXEXITHANDLER hmR0VmxExitIoInstr;
381static FNVMXEXITHANDLER hmR0VmxExitRdmsr;
382static FNVMXEXITHANDLER hmR0VmxExitWrmsr;
383static FNVMXEXITHANDLER hmR0VmxExitErrInvalidGuestState;
384static FNVMXEXITHANDLER hmR0VmxExitErrMsrLoad;
385static FNVMXEXITHANDLER hmR0VmxExitErrUndefined;
386static FNVMXEXITHANDLER hmR0VmxExitMwait;
387static FNVMXEXITHANDLER hmR0VmxExitMtf;
388static FNVMXEXITHANDLER hmR0VmxExitMonitor;
389static FNVMXEXITHANDLER hmR0VmxExitPause;
390static FNVMXEXITHANDLER hmR0VmxExitErrMachineCheck;
391static FNVMXEXITHANDLER hmR0VmxExitTprBelowThreshold;
392static FNVMXEXITHANDLER hmR0VmxExitApicAccess;
393static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
394static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
395static FNVMXEXITHANDLER hmR0VmxExitEptViolation;
396static FNVMXEXITHANDLER hmR0VmxExitEptMisconfig;
397static FNVMXEXITHANDLER hmR0VmxExitRdtscp;
398static FNVMXEXITHANDLER hmR0VmxExitPreemptTimer;
399static FNVMXEXITHANDLER hmR0VmxExitWbinvd;
400static FNVMXEXITHANDLER hmR0VmxExitXsetbv;
401static FNVMXEXITHANDLER hmR0VmxExitRdrand;
402static FNVMXEXITHANDLER hmR0VmxExitInvpcid;
403/** @} */
404
405static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
406static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
407static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
408static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
409static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
410static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
411#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
412static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
413#endif
414static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
415
416/*******************************************************************************
417* Global Variables *
418*******************************************************************************/
419#ifdef HMVMX_USE_FUNCTION_TABLE
420
421/**
422 * VMX_EXIT dispatch table.
423 */
424static const PFNVMXEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
425{
426 /* 00 VMX_EXIT_XCPT_OR_NMI */ hmR0VmxExitXcptOrNmi,
427 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
428 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
429 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
430 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
431 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
432 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
433 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
434 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
435 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
436 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
437 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
438 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
439 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
440 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
441 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
442 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
443 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
444 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitVmcall,
445 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
446 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
447 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
448 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
449 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
450 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
451 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
452 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
453 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
454 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
455 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
456 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
457 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
458 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
459 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
460 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
461 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
462 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
463 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
464 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
465 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
466 /* 40 UNDEFINED */ hmR0VmxExitPause,
467 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
468 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
469 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
470 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
471 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
472 /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
473 /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
474 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
475 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
476 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
477 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
478 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
479 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
480 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
481 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
482 /* 56 UNDEFINED */ hmR0VmxExitErrUndefined,
483 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
484 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
485 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD
486};
487#endif /* HMVMX_USE_FUNCTION_TABLE */
488
489#ifdef VBOX_STRICT
490static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
491{
492 /* 0 */ "(Not Used)",
493 /* 1 */ "VMCALL executed in VMX root operation.",
494 /* 2 */ "VMCLEAR with invalid physical address.",
495 /* 3 */ "VMCLEAR with VMXON pointer.",
496 /* 4 */ "VMLAUNCH with non-clear VMCS.",
497 /* 5 */ "VMRESUME with non-launched VMCS.",
498 /* 6 */ "VMRESUME after VMXOFF",
499 /* 7 */ "VM entry with invalid control fields.",
500 /* 8 */ "VM entry with invalid host state fields.",
501 /* 9 */ "VMPTRLD with invalid physical address.",
502 /* 10 */ "VMPTRLD with VMXON pointer.",
503 /* 11 */ "VMPTRLD with incorrect revision identifier.",
504 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
505 /* 13 */ "VMWRITE to read-only VMCS component.",
506 /* 14 */ "(Not Used)",
507 /* 15 */ "VMXON executed in VMX root operation.",
508 /* 16 */ "VM entry with invalid executive-VMCS pointer.",
509 /* 17 */ "VM entry with non-launched executing VMCS.",
510 /* 18 */ "VM entry with executive-VMCS pointer not VMXON pointer.",
511 /* 19 */ "VMCALL with non-clear VMCS.",
512 /* 20 */ "VMCALL with invalid VM-exit control fields.",
513 /* 21 */ "(Not Used)",
514 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
515 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
516 /* 24 */ "VMCALL with invalid SMM-monitor features.",
517 /* 25 */ "VM entry with invalid VM-execution control fields in executive VMCS.",
518 /* 26 */ "VM entry with events blocked by MOV SS.",
519 /* 27 */ "(Not Used)",
520 /* 28 */ "Invalid operand to INVEPT/INVVPID."
521};
522#endif /* VBOX_STRICT */
523
524
525
526/**
527 * Updates the VM's last error record. If there was a VMX instruction error,
528 * reads the error data from the VMCS and updates VCPU's last error record as
529 * well.
530 *
531 * @param pVM Pointer to the VM.
532 * @param pVCpu Pointer to the VMCPU (can be NULL if @a rc is not
533 * VERR_VMX_UNABLE_TO_START_VM or
534 * VERR_VMX_INVALID_VMCS_FIELD).
535 * @param rc The error code.
536 */
537static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
538{
539 AssertPtr(pVM);
540 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
541 || rc == VERR_VMX_UNABLE_TO_START_VM)
542 {
543 AssertPtrReturnVoid(pVCpu);
544 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
545 }
546 pVM->hm.s.lLastError = rc;
547}
548
549
550/**
551 * Reads the VM-entry interruption-information field from the VMCS into the VMX
552 * transient structure.
553 *
554 * @returns VBox status code.
555 * @param pVmxTransient Pointer to the VMX transient structure.
556 *
557 * @remarks No-long-jump zone!!!
558 */
559DECLINLINE(int) hmR0VmxReadEntryIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
560{
561 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
562 AssertRCReturn(rc, rc);
563 return VINF_SUCCESS;
564}
565
566
567/**
568 * Reads the VM-entry exception error code field from the VMCS into
569 * the VMX transient structure.
570 *
571 * @returns VBox status code.
572 * @param pVmxTransient Pointer to the VMX transient structure.
573 *
574 * @remarks No-long-jump zone!!!
575 */
576DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
577{
578 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
579 AssertRCReturn(rc, rc);
580 return VINF_SUCCESS;
581}
582
583
584/**
585 * Reads the VM-entry exception error code field from the VMCS into
586 * the VMX transient structure.
587 *
588 * @returns VBox status code.
589 * @param pVmxTransient Pointer to the VMX transient structure.
590 *
591 * @remarks No-long-jump zone!!!
592 */
593DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
594{
595 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
596 AssertRCReturn(rc, rc);
597 return VINF_SUCCESS;
598}
599
600
601/**
602 * Reads the VM-exit interruption-information field from the VMCS into the VMX
603 * transient structure.
604 *
605 * @returns VBox status code.
606 * @param pVmxTransient Pointer to the VMX transient structure.
607 */
608DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
609{
610 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
611 {
612 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
613 AssertRCReturn(rc, rc);
614 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;
615 }
616 return VINF_SUCCESS;
617}
618
619
620/**
621 * Reads the VM-exit interruption error code from the VMCS into the VMX
622 * transient structure.
623 *
624 * @returns VBox status code.
625 * @param pVmxTransient Pointer to the VMX transient structure.
626 */
627DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
628{
629 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
630 {
631 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
632 AssertRCReturn(rc, rc);
633 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
634 }
635 return VINF_SUCCESS;
636}
637
638
639/**
640 * Reads the VM-exit instruction length field from the VMCS into the VMX
641 * transient structure.
642 *
643 * @returns VBox status code.
644 * @param pVCpu Pointer to the VMCPU.
645 * @param pVmxTransient Pointer to the VMX transient structure.
646 */
647DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
648{
649 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
650 {
651 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
652 AssertRCReturn(rc, rc);
653 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
654 }
655 return VINF_SUCCESS;
656}
657
658
659/**
660 * Reads the VM-exit instruction-information field from the VMCS into
661 * the VMX transient structure.
662 *
663 * @returns VBox status code.
664 * @param pVmxTransient Pointer to the VMX transient structure.
665 */
666DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient)
667{
668 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO))
669 {
670 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
671 AssertRCReturn(rc, rc);
672 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO;
673 }
674 return VINF_SUCCESS;
675}
676
677
678/**
679 * Reads the exit qualification from the VMCS into the VMX transient structure.
680 *
681 * @returns VBox status code.
682 * @param pVCpu Pointer to the VMCPU (required for the VMCS cache
683 * case).
684 * @param pVmxTransient Pointer to the VMX transient structure.
685 */
686DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
687{
688 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
689 {
690 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification); NOREF(pVCpu);
691 AssertRCReturn(rc, rc);
692 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION;
693 }
694 return VINF_SUCCESS;
695}
696
697
698/**
699 * Reads the IDT-vectoring information field from the VMCS into the VMX
700 * transient structure.
701 *
702 * @returns VBox status code.
703 * @param pVmxTransient Pointer to the VMX transient structure.
704 *
705 * @remarks No-long-jump zone!!!
706 */
707DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
708{
709 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO))
710 {
711 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
712 AssertRCReturn(rc, rc);
713 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO;
714 }
715 return VINF_SUCCESS;
716}
717
718
719/**
720 * Reads the IDT-vectoring error code from the VMCS into the VMX
721 * transient structure.
722 *
723 * @returns VBox status code.
724 * @param pVmxTransient Pointer to the VMX transient structure.
725 */
726DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
727{
728 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))
729 {
730 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
731 AssertRCReturn(rc, rc);
732 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;
733 }
734 return VINF_SUCCESS;
735}
736
737
738/**
739 * Enters VMX root mode operation on the current CPU.
740 *
741 * @returns VBox status code.
742 * @param pVM Pointer to the VM (optional, can be NULL, after
743 * a resume).
744 * @param HCPhysCpuPage Physical address of the VMXON region.
745 * @param pvCpuPage Pointer to the VMXON region.
746 */
747static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
748{
749 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
750 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
751 Assert(pvCpuPage);
752 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
753
754 if (pVM)
755 {
756 /* Write the VMCS revision dword to the VMXON region. */
757 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
758 }
759
760 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */
761 RTCCUINTREG uEflags = ASMIntDisableFlags();
762
763 /* Enable the VMX bit in CR4 if necessary. */
764 RTCCUINTREG uCr4 = ASMGetCR4();
765 if (!(uCr4 & X86_CR4_VMXE))
766 ASMSetCR4(uCr4 | X86_CR4_VMXE);
767
768 /* Enter VMX root mode. */
769 int rc = VMXEnable(HCPhysCpuPage);
770 if (RT_FAILURE(rc))
771 ASMSetCR4(uCr4);
772
773 /* Restore interrupts. */
774 ASMSetFlags(uEflags);
775 return rc;
776}
777
778
779/**
780 * Exits VMX root mode operation on the current CPU.
781 *
782 * @returns VBox status code.
783 */
784static int hmR0VmxLeaveRootMode(void)
785{
786 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
787
788 /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */
789 RTCCUINTREG uEflags = ASMIntDisableFlags();
790
791 /* If we're for some reason not in VMX root mode, then don't leave it. */
792 RTCCUINTREG uHostCR4 = ASMGetCR4();
793
794 int rc;
795 if (uHostCR4 & X86_CR4_VMXE)
796 {
797 /* Exit VMX root mode and clear the VMX bit in CR4. */
798 VMXDisable();
799 ASMSetCR4(uHostCR4 & ~X86_CR4_VMXE);
800 rc = VINF_SUCCESS;
801 }
802 else
803 rc = VERR_VMX_NOT_IN_VMX_ROOT_MODE;
804
805 /* Restore interrupts. */
806 ASMSetFlags(uEflags);
807 return rc;
808}
809
810
811/**
812 * Allocates and maps one physically contiguous page. The allocated page is
813 * zero'd out. (Used by various VT-x structures).
814 *
815 * @returns IPRT status code.
816 * @param pMemObj Pointer to the ring-0 memory object.
817 * @param ppVirt Where to store the virtual address of the
818 * allocation.
819 * @param pPhys Where to store the physical address of the
820 * allocation.
821 */
822DECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
823{
824 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
825 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
826 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
827
828 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
829 if (RT_FAILURE(rc))
830 return rc;
831 *ppVirt = RTR0MemObjAddress(*pMemObj);
832 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
833 ASMMemZero32(*ppVirt, PAGE_SIZE);
834 return VINF_SUCCESS;
835}
836
837
838/**
839 * Frees and unmaps an allocated physical page.
840 *
841 * @param pMemObj Pointer to the ring-0 memory object.
842 * @param ppVirt Where to re-initialize the virtual address of
843 * allocation as 0.
844 * @param pHCPhys Where to re-initialize the physical address of the
845 * allocation as 0.
846 */
847DECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
848{
849 AssertPtr(pMemObj);
850 AssertPtr(ppVirt);
851 AssertPtr(pHCPhys);
852 if (*pMemObj != NIL_RTR0MEMOBJ)
853 {
854 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
855 AssertRC(rc);
856 *pMemObj = NIL_RTR0MEMOBJ;
857 *ppVirt = 0;
858 *pHCPhys = 0;
859 }
860}
861
862
863/**
864 * Worker function to free VT-x related structures.
865 *
866 * @returns IPRT status code.
867 * @param pVM Pointer to the VM.
868 */
869static void hmR0VmxStructsFree(PVM pVM)
870{
871 for (VMCPUID i = 0; i < pVM->cCpus; i++)
872 {
873 PVMCPU pVCpu = &pVM->aCpus[i];
874 AssertPtr(pVCpu);
875
876 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
877 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
878
879 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
880 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
881
882 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
883 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
884 }
885
886 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
887#ifdef VBOX_WITH_CRASHDUMP_MAGIC
888 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
889#endif
890}
891
892
893/**
894 * Worker function to allocate VT-x related VM structures.
895 *
896 * @returns IPRT status code.
897 * @param pVM Pointer to the VM.
898 */
899static int hmR0VmxStructsAlloc(PVM pVM)
900{
901 /*
902 * Initialize members up-front so we can cleanup properly on allocation failure.
903 */
904#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
905 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
906 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
907 pVM->hm.s.vmx.HCPhys##a_Name = 0;
908
909#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
910 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
911 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
912 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
913
914#ifdef VBOX_WITH_CRASHDUMP_MAGIC
915 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
916#endif
917 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
918
919 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
920 for (VMCPUID i = 0; i < pVM->cCpus; i++)
921 {
922 PVMCPU pVCpu = &pVM->aCpus[i];
923 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
924 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
925 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
926 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
927 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
928 }
929#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
930#undef VMXLOCAL_INIT_VM_MEMOBJ
931
932 /* The VMCS size cannot be more than 4096 bytes. See Intel spec. Appendix A.1 "Basic VMX Information". */
933 AssertReturnStmt(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.Msrs.u64BasicInfo) <= PAGE_SIZE,
934 (&pVM->aCpus[0])->hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE,
935 VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO);
936
937 /*
938 * Allocate all the VT-x structures.
939 */
940 int rc = VINF_SUCCESS;
941#ifdef VBOX_WITH_CRASHDUMP_MAGIC
942 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
943 if (RT_FAILURE(rc))
944 goto cleanup;
945 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
946 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
947#endif
948
949 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
950 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
951 {
952 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
953 &pVM->hm.s.vmx.HCPhysApicAccess);
954 if (RT_FAILURE(rc))
955 goto cleanup;
956 }
957
958 /*
959 * Initialize per-VCPU VT-x structures.
960 */
961 for (VMCPUID i = 0; i < pVM->cCpus; i++)
962 {
963 PVMCPU pVCpu = &pVM->aCpus[i];
964 AssertPtr(pVCpu);
965
966 /* Allocate the VM control structure (VMCS). */
967 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
968 if (RT_FAILURE(rc))
969 goto cleanup;
970
971 /* Allocate the Virtual-APIC page for transparent TPR accesses. */
972 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
973 {
974 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
975 &pVCpu->hm.s.vmx.HCPhysVirtApic);
976 if (RT_FAILURE(rc))
977 goto cleanup;
978 }
979
980 /*
981 * Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for
982 * transparent accesses of specific MSRs.
983 *
984 * If the condition for enabling MSR bitmaps changes here, don't forget to
985 * update HMIsMsrBitmapsAvailable().
986 */
987 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
988 {
989 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
990 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
991 if (RT_FAILURE(rc))
992 goto cleanup;
993 ASMMemFill32(pVCpu->hm.s.vmx.pvMsrBitmap, PAGE_SIZE, UINT32_C(0xffffffff));
994 }
995
996 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
997 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
998 if (RT_FAILURE(rc))
999 goto cleanup;
1000
1001 /* Allocate the VM-exit MSR-load page for the host MSRs. */
1002 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
1003 if (RT_FAILURE(rc))
1004 goto cleanup;
1005 }
1006
1007 return VINF_SUCCESS;
1008
1009cleanup:
1010 hmR0VmxStructsFree(pVM);
1011 return rc;
1012}
1013
1014
1015/**
1016 * Does global VT-x initialization (called during module initialization).
1017 *
1018 * @returns VBox status code.
1019 */
1020VMMR0DECL(int) VMXR0GlobalInit(void)
1021{
1022#ifdef HMVMX_USE_FUNCTION_TABLE
1023 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
1024# ifdef VBOX_STRICT
1025 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
1026 Assert(g_apfnVMExitHandlers[i]);
1027# endif
1028#endif
1029 return VINF_SUCCESS;
1030}
1031
1032
1033/**
1034 * Does global VT-x termination (called during module termination).
1035 */
1036VMMR0DECL(void) VMXR0GlobalTerm()
1037{
1038 /* Nothing to do currently. */
1039}
1040
1041
1042/**
1043 * Sets up and activates VT-x on the current CPU.
1044 *
1045 * @returns VBox status code.
1046 * @param pCpu Pointer to the global CPU info struct.
1047 * @param pVM Pointer to the VM (can be NULL after a host resume
1048 * operation).
1049 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
1050 * fEnabledByHost is true).
1051 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
1052 * @a fEnabledByHost is true).
1053 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
1054 * enable VT-x on the host.
1055 * @param pvMsrs Opaque pointer to VMXMSRS struct.
1056 */
1057VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
1058 void *pvMsrs)
1059{
1060 Assert(pCpu);
1061 Assert(pvMsrs);
1062 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1063
1064 /* Enable VT-x if it's not already enabled by the host. */
1065 if (!fEnabledByHost)
1066 {
1067 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
1068 if (RT_FAILURE(rc))
1069 return rc;
1070 }
1071
1072 /*
1073 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been using EPTPs) so
1074 * we don't retain any stale guest-physical mappings which won't get invalidated when flushing by VPID.
1075 */
1076 PVMXMSRS pMsrs = (PVMXMSRS)pvMsrs;
1077 if (pMsrs->u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1078 {
1079 hmR0VmxFlushEpt(NULL /* pVCpu */, VMXFLUSHEPT_ALL_CONTEXTS);
1080 pCpu->fFlushAsidBeforeUse = false;
1081 }
1082 else
1083 pCpu->fFlushAsidBeforeUse = true;
1084
1085 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
1086 ++pCpu->cTlbFlushes;
1087
1088 return VINF_SUCCESS;
1089}
1090
1091
1092/**
1093 * Deactivates VT-x on the current CPU.
1094 *
1095 * @returns VBox status code.
1096 * @param pCpu Pointer to the global CPU info struct.
1097 * @param pvCpuPage Pointer to the VMXON region.
1098 * @param HCPhysCpuPage Physical address of the VMXON region.
1099 *
1100 * @remarks This function should never be called when SUPR0EnableVTx() or
1101 * similar was used to enable VT-x on the host.
1102 */
1103VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
1104{
1105 NOREF(pCpu);
1106 NOREF(pvCpuPage);
1107 NOREF(HCPhysCpuPage);
1108
1109 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1110 return hmR0VmxLeaveRootMode();
1111}
1112
1113
1114/**
1115 * Sets the permission bits for the specified MSR in the MSR bitmap.
1116 *
1117 * @param pVCpu Pointer to the VMCPU.
1118 * @param uMSR The MSR value.
1119 * @param enmRead Whether reading this MSR causes a VM-exit.
1120 * @param enmWrite Whether writing this MSR causes a VM-exit.
1121 */
1122static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
1123{
1124 int32_t iBit;
1125 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1126
1127 /*
1128 * Layout:
1129 * 0x000 - 0x3ff - Low MSR read bits
1130 * 0x400 - 0x7ff - High MSR read bits
1131 * 0x800 - 0xbff - Low MSR write bits
1132 * 0xc00 - 0xfff - High MSR write bits
1133 */
1134 if (uMsr <= 0x00001FFF)
1135 iBit = uMsr;
1136 else if ( uMsr >= 0xC0000000
1137 && uMsr <= 0xC0001FFF)
1138 {
1139 iBit = (uMsr - 0xC0000000);
1140 pbMsrBitmap += 0x400;
1141 }
1142 else
1143 {
1144 AssertMsgFailed(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1145 return;
1146 }
1147
1148 Assert(iBit <= 0x1fff);
1149 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
1150 ASMBitSet(pbMsrBitmap, iBit);
1151 else
1152 ASMBitClear(pbMsrBitmap, iBit);
1153
1154 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
1155 ASMBitSet(pbMsrBitmap + 0x800, iBit);
1156 else
1157 ASMBitClear(pbMsrBitmap + 0x800, iBit);
1158}
1159
1160
1161#ifdef VBOX_STRICT
1162/**
1163 * Gets the permission bits for the specified MSR in the MSR bitmap.
1164 *
1165 * @returns VBox status code.
1166 * @retval VINF_SUCCESS if the specified MSR is found.
1167 * @retval VERR_NOT_FOUND if the specified MSR is not found.
1168 * @retval VERR_NOT_SUPPORTED if VT-x doesn't allow the MSR.
1169 *
1170 * @param pVCpu Pointer to the VMCPU.
1171 * @param uMsr The MSR.
1172 * @param penmRead Where to store the read permissions.
1173 * @param penmWrite Where to store the write permissions.
1174 */
1175static int hmR0VmxGetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, PVMXMSREXITREAD penmRead, PVMXMSREXITWRITE penmWrite)
1176{
1177 AssertPtrReturn(penmRead, VERR_INVALID_PARAMETER);
1178 AssertPtrReturn(penmWrite, VERR_INVALID_PARAMETER);
1179 int32_t iBit;
1180 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1181
1182 /* See hmR0VmxSetMsrPermission() for the layout. */
1183 if (uMsr <= 0x00001FFF)
1184 iBit = uMsr;
1185 else if ( uMsr >= 0xC0000000
1186 && uMsr <= 0xC0001FFF)
1187 {
1188 iBit = (uMsr - 0xC0000000);
1189 pbMsrBitmap += 0x400;
1190 }
1191 else
1192 {
1193 AssertMsgFailed(("hmR0VmxGetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1194 return VERR_NOT_SUPPORTED;
1195 }
1196
1197 Assert(iBit <= 0x1fff);
1198 if (ASMBitTest(pbMsrBitmap, iBit))
1199 *penmRead = VMXMSREXIT_INTERCEPT_READ;
1200 else
1201 *penmRead = VMXMSREXIT_PASSTHRU_READ;
1202
1203 if (ASMBitTest(pbMsrBitmap + 0x800, iBit))
1204 *penmWrite = VMXMSREXIT_INTERCEPT_WRITE;
1205 else
1206 *penmWrite = VMXMSREXIT_PASSTHRU_WRITE;
1207 return VINF_SUCCESS;
1208}
1209#endif /* VBOX_STRICT */
1210
1211
1212/**
1213 * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
1214 * area.
1215 *
1216 * @returns VBox status code.
1217 * @param pVCpu Pointer to the VMCPU.
1218 * @param cMsrs The number of MSRs.
1219 */
1220DECLINLINE(int) hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs)
1221{
1222 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
1223 uint32_t const cMaxSupportedMsrs = MSR_IA32_VMX_MISC_MAX_MSR(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc);
1224 if (RT_UNLIKELY(cMsrs > cMaxSupportedMsrs))
1225 {
1226 LogRel(("CPU auto-load/store MSR count in VMCS exceeded cMsrs=%u Supported=%u.\n", cMsrs, cMaxSupportedMsrs));
1227 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
1228 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1229 }
1230
1231 /* Update number of guest MSRs to load/store across the world-switch. */
1232 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs); AssertRCReturn(rc, rc);
1233 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs); AssertRCReturn(rc, rc);
1234
1235 /* Update number of host MSRs to load after the world-switch. Identical to guest-MSR count as it's always paired. */
1236 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs); AssertRCReturn(rc, rc);
1237
1238 /* Update the VCPU's copy of the MSR count. */
1239 pVCpu->hm.s.vmx.cMsrs = cMsrs;
1240
1241 return VINF_SUCCESS;
1242}
1243
1244
1245/**
1246 * Adds a new (or updates the value of an existing) guest/host MSR
1247 * pair to be swapped during the world-switch as part of the
1248 * auto-load/store MSR area in the VMCS.
1249 *
1250 * @returns true if the MSR was added -and- its value was updated, false
1251 * otherwise.
1252 * @param pVCpu Pointer to the VMCPU.
1253 * @param uMsr The MSR.
1254 * @param uGuestMsr Value of the guest MSR.
1255 * @param fUpdateHostMsr Whether to update the value of the host MSR if
1256 * necessary.
1257 */
1258static bool hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr)
1259{
1260 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1261 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1262 uint32_t i;
1263 for (i = 0; i < cMsrs; i++)
1264 {
1265 if (pGuestMsr->u32Msr == uMsr)
1266 break;
1267 pGuestMsr++;
1268 }
1269
1270 bool fAdded = false;
1271 if (i == cMsrs)
1272 {
1273 ++cMsrs;
1274 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1275 AssertRC(rc);
1276
1277 /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */
1278 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1279 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1280
1281 fAdded = true;
1282 }
1283
1284 /* Update the MSR values in the auto-load/store MSR area. */
1285 pGuestMsr->u32Msr = uMsr;
1286 pGuestMsr->u64Value = uGuestMsrValue;
1287
1288 /* Create/update the MSR slot in the host MSR area. */
1289 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1290 pHostMsr += i;
1291 pHostMsr->u32Msr = uMsr;
1292
1293 /*
1294 * Update the host MSR only when requested by the caller AND when we're
1295 * adding it to the auto-load/store area. Otherwise, it would have been
1296 * updated by hmR0VmxSaveHostMsrs(). We do this for performance reasons.
1297 */
1298 bool fUpdatedMsrValue = false;
1299 if ( fAdded
1300 && fUpdateHostMsr)
1301 {
1302 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1303 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1304 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1305 fUpdatedMsrValue = true;
1306 }
1307
1308 return fUpdatedMsrValue;
1309}
1310
1311
1312/**
1313 * Removes a guest/host MSR pair to be swapped during the world-switch from the
1314 * auto-load/store MSR area in the VMCS.
1315 *
1316 * @returns VBox status code.
1317 * @param pVCpu Pointer to the VMCPU.
1318 * @param uMsr The MSR.
1319 */
1320static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr)
1321{
1322 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1323 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1324 for (uint32_t i = 0; i < cMsrs; i++)
1325 {
1326 /* Find the MSR. */
1327 if (pGuestMsr->u32Msr == uMsr)
1328 {
1329 /* If it's the last MSR, simply reduce the count. */
1330 if (i == cMsrs - 1)
1331 {
1332 --cMsrs;
1333 break;
1334 }
1335
1336 /* Remove it by swapping the last MSR in place of it, and reducing the count. */
1337 PVMXAUTOMSR pLastGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1338 pLastGuestMsr += cMsrs - 1;
1339 pGuestMsr->u32Msr = pLastGuestMsr->u32Msr;
1340 pGuestMsr->u64Value = pLastGuestMsr->u64Value;
1341
1342 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1343 PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1344 pLastHostMsr += cMsrs - 1;
1345 pHostMsr->u32Msr = pLastHostMsr->u32Msr;
1346 pHostMsr->u64Value = pLastHostMsr->u64Value;
1347 --cMsrs;
1348 break;
1349 }
1350 pGuestMsr++;
1351 }
1352
1353 /* Update the VMCS if the count changed (meaning the MSR was found). */
1354 if (cMsrs != pVCpu->hm.s.vmx.cMsrs)
1355 {
1356 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1357 AssertRCReturn(rc, rc);
1358
1359 /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
1360 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1361 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
1362
1363 Log4(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs));
1364 return VINF_SUCCESS;
1365 }
1366
1367 return VERR_NOT_FOUND;
1368}
1369
1370
1371/**
1372 * Checks if the specified guest MSR is part of the auto-load/store area in
1373 * the VMCS.
1374 *
1375 * @returns true if found, false otherwise.
1376 * @param pVCpu Pointer to the VMCPU.
1377 * @param uMsr The MSR to find.
1378 */
1379static bool hmR0VmxIsAutoLoadStoreGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1380{
1381 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1382 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1383
1384 for (uint32_t i = 0; i < cMsrs; i++, pGuestMsr++)
1385 {
1386 if (pGuestMsr->u32Msr == uMsr)
1387 return true;
1388 }
1389 return false;
1390}
1391
1392
1393/**
1394 * Updates the value of all host MSRs in the auto-load/store area in the VMCS.
1395 *
1396 * @param pVCpu Pointer to the VMCPU.
1397 *
1398 * @remarks No-long-jump zone!!!
1399 */
1400static void hmR0VmxUpdateAutoLoadStoreHostMsrs(PVMCPU pVCpu)
1401{
1402 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1403 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1404 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1405 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1406
1407 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1408 {
1409 AssertReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr);
1410
1411 /*
1412 * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.
1413 * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}.
1414 */
1415 if (pHostMsr->u32Msr == MSR_K6_EFER)
1416 pHostMsr->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostEfer;
1417 else
1418 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1419 }
1420
1421 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
1422}
1423
1424
1425#if HC_ARCH_BITS == 64
1426/**
1427 * Saves a set of host MSRs to allow read/write passthru access to the guest and
1428 * perform lazy restoration of the host MSRs while leaving VT-x.
1429 *
1430 * @param pVCpu Pointer to the VMCPU.
1431 *
1432 * @remarks No-long-jump zone!!!
1433 */
1434static void hmR0VmxLazySaveHostMsrs(PVMCPU pVCpu)
1435{
1436 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1437
1438 /*
1439 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls().
1440 */
1441 if (!(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_LOADED_GUEST))
1442 {
1443 pVCpu->hm.s.vmx.u64HostLStarMsr = ASMRdMsr(MSR_K8_LSTAR);
1444 pVCpu->hm.s.vmx.u64HostStarMsr = ASMRdMsr(MSR_K6_STAR);
1445 pVCpu->hm.s.vmx.u64HostSFMaskMsr = ASMRdMsr(MSR_K8_SF_MASK);
1446 pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1447 pVCpu->hm.s.vmx.fRestoreHostMsrs |= VMX_RESTORE_HOST_MSR_SAVED_HOST;
1448 }
1449}
1450
1451
1452/**
1453 * Checks whether the MSR belongs to the set of guest MSRs that we restore
1454 * lazily while leaving VT-x.
1455 *
1456 * @returns true if it does, false otherwise.
1457 * @param pVCpu Pointer to the VMCPU.
1458 * @param uMsr The MSR to check.
1459 */
1460static bool hmR0VmxIsLazyGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1461{
1462 NOREF(pVCpu);
1463 switch (uMsr)
1464 {
1465 case MSR_K8_LSTAR:
1466 case MSR_K6_STAR:
1467 case MSR_K8_SF_MASK:
1468 case MSR_K8_KERNEL_GS_BASE:
1469 return true;
1470 }
1471 return false;
1472}
1473
1474
1475/**
1476 * Saves a set of guests MSRs back into the guest-CPU context.
1477 *
1478 * @param pVCpu Pointer to the VMCPU.
1479 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1480 * out-of-sync. Make sure to update the required fields
1481 * before using them.
1482 *
1483 * @remarks No-long-jump zone!!!
1484 */
1485static void hmR0VmxLazySaveGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1486{
1487 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1488 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1489
1490 if (pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_LOADED_GUEST)
1491 {
1492 Assert(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_SAVED_HOST);
1493 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
1494 pMixedCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
1495 pMixedCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
1496 pMixedCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1497 }
1498}
1499
1500
1501/**
1502 * Loads a set of guests MSRs to allow read/passthru to the guest.
1503 *
1504 * The name of this function is slightly confusing. This function does NOT
1505 * postpone loading, but loads the MSR right now. "hmR0VmxLazy" is simply a
1506 * common prefix for functions dealing with "lazy restoration" of the shared
1507 * MSRs.
1508 *
1509 * @param pVCpu Pointer to the VMCPU.
1510 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1511 * out-of-sync. Make sure to update the required fields
1512 * before using them.
1513 *
1514 * @remarks No-long-jump zone!!!
1515 */
1516static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1517{
1518 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1519 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1520
1521 Assert(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_SAVED_HOST);
1522 if (!(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_LOADED_GUEST))
1523 {
1524#define VMXLOCAL_LAZY_LOAD_GUEST_MSR(uMsr, a_GuestMsr, a_HostMsr) \
1525 do { \
1526 if (pMixedCtx->msr##a_GuestMsr != pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr) \
1527 ASMWrMsr(uMsr, pMixedCtx->msr##a_GuestMsr); \
1528 else \
1529 Assert(ASMRdMsr(uMsr) == pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr); \
1530 } while (0)
1531
1532 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_LSTAR, LSTAR, LStar);
1533 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K6_STAR, STAR, Star);
1534 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, SFMask);
1535 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBase);
1536#undef VMXLOCAL_LAZY_LOAD_GUEST_MSR
1537 }
1538 else
1539 {
1540 ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR);
1541 ASMWrMsr(MSR_K6_STAR, pMixedCtx->msrSTAR);
1542 ASMWrMsr(MSR_K8_SF_MASK, pMixedCtx->msrSFMASK);
1543 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE);
1544 }
1545 pVCpu->hm.s.vmx.fRestoreHostMsrs |= VMX_RESTORE_HOST_MSR_LOADED_GUEST;
1546}
1547
1548
1549/**
1550 * Performs lazy restoration of the set of host MSRs if they were previously
1551 * loaded with guest MSR values.
1552 *
1553 * @param pVCpu Pointer to the VMCPU.
1554 *
1555 * @remarks No-long-jump zone!!!
1556 * @remarks The guest MSRs should have been saved back into the guest-CPU
1557 * context by hmR0VmxSaveGuestLazyMsrs()!!!
1558 */
1559static void hmR0VmxLazyRestoreHostMsrs(PVMCPU pVCpu)
1560{
1561 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1562 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1563
1564 if (pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_LOADED_GUEST)
1565 {
1566 Assert(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_SAVED_HOST);
1567 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm.s.vmx.u64HostLStarMsr);
1568 ASMWrMsr(MSR_K6_STAR, pVCpu->hm.s.vmx.u64HostStarMsr);
1569 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm.s.vmx.u64HostSFMaskMsr);
1570 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr);
1571 }
1572 pVCpu->hm.s.vmx.fRestoreHostMsrs &= ~(VMX_RESTORE_HOST_MSR_LOADED_GUEST | VMX_RESTORE_HOST_MSR_SAVED_HOST);
1573}
1574#endif /* HC_ARCH_BITS == 64 */
1575
1576
1577/**
1578 * Verifies that our cached values of the VMCS controls are all
1579 * consistent with what's actually present in the VMCS.
1580 *
1581 * @returns VBox status code.
1582 * @param pVCpu Pointer to the VMCPU.
1583 */
1584static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu)
1585{
1586 uint32_t u32Val;
1587 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
1588 AssertRCReturn(rc, rc);
1589 AssertMsgReturn(pVCpu->hm.s.vmx.u32EntryCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32EntryCtls, u32Val),
1590 VERR_VMX_ENTRY_CTLS_CACHE_INVALID);
1591
1592 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);
1593 AssertRCReturn(rc, rc);
1594 AssertMsgReturn(pVCpu->hm.s.vmx.u32ExitCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ExitCtls, u32Val),
1595 VERR_VMX_EXIT_CTLS_CACHE_INVALID);
1596
1597 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1598 AssertRCReturn(rc, rc);
1599 AssertMsgReturn(pVCpu->hm.s.vmx.u32PinCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32PinCtls, u32Val),
1600 VERR_VMX_PIN_EXEC_CTLS_CACHE_INVALID);
1601
1602 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1603 AssertRCReturn(rc, rc);
1604 AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls, u32Val),
1605 VERR_VMX_PROC_EXEC_CTLS_CACHE_INVALID);
1606
1607 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1608 AssertRCReturn(rc, rc);
1609 AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls2 == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls2, u32Val),
1610 VERR_VMX_PROC_EXEC2_CTLS_CACHE_INVALID);
1611
1612 return VINF_SUCCESS;
1613}
1614
1615
1616#ifdef VBOX_STRICT
1617/**
1618 * Verifies that our cached host EFER value has not changed
1619 * since we cached it.
1620 *
1621 * @param pVCpu Pointer to the VMCPU.
1622 */
1623static void hmR0VmxCheckHostEferMsr(PVMCPU pVCpu)
1624{
1625 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1626
1627 if (pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
1628 {
1629 uint64_t u64Val;
1630 int rc = VMXReadVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, &u64Val);
1631 AssertRC(rc);
1632
1633 uint64_t u64HostEferMsr = ASMRdMsr(MSR_K6_EFER);
1634 AssertMsgReturnVoid(u64HostEferMsr == u64Val, ("u64HostEferMsr=%#RX64 u64Val=%#RX64\n", u64HostEferMsr, u64Val));
1635 }
1636}
1637
1638
1639/**
1640 * Verifies whether the guest/host MSR pairs in the auto-load/store area in the
1641 * VMCS are correct.
1642 *
1643 * @param pVCpu Pointer to the VMCPU.
1644 */
1645static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu)
1646{
1647 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1648
1649 /* Verify MSR counts in the VMCS are what we think it should be. */
1650 uint32_t cMsrs;
1651 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1652 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1653
1654 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cMsrs); AssertRC(rc);
1655 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1656
1657 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1658 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1659
1660 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1661 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1662 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1663 {
1664 /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */
1665 AssertMsgReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr, ("HostMsr=%#RX32 GuestMsr=%#RX32 cMsrs=%u\n", pHostMsr->u32Msr,
1666 pGuestMsr->u32Msr, cMsrs));
1667
1668 uint64_t u64Msr = ASMRdMsr(pHostMsr->u32Msr);
1669 AssertMsgReturnVoid(pHostMsr->u64Value == u64Msr, ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
1670 pHostMsr->u32Msr, pHostMsr->u64Value, u64Msr, cMsrs));
1671
1672 /* Verify that the permissions are as expected in the MSR bitmap. */
1673 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1674 {
1675 VMXMSREXITREAD enmRead;
1676 VMXMSREXITWRITE enmWrite;
1677 rc = hmR0VmxGetMsrPermission(pVCpu, pGuestMsr->u32Msr, &enmRead, &enmWrite);
1678 AssertMsgReturnVoid(rc == VINF_SUCCESS, ("hmR0VmxGetMsrPermission! failed. rc=%Rrc\n", rc));
1679 if (pGuestMsr->u32Msr == MSR_K6_EFER)
1680 {
1681 AssertMsgReturnVoid(enmRead == VMXMSREXIT_INTERCEPT_READ, ("Passthru read for EFER!?\n"));
1682 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_INTERCEPT_WRITE, ("Passthru write for EFER!?\n"));
1683 }
1684 else
1685 {
1686 AssertMsgReturnVoid(enmRead == VMXMSREXIT_PASSTHRU_READ, ("u32Msr=%#RX32 cMsrs=%u No passthru read!\n",
1687 pGuestMsr->u32Msr, cMsrs));
1688 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_PASSTHRU_WRITE, ("u32Msr=%#RX32 cMsrs=%u No passthru write!\n",
1689 pGuestMsr->u32Msr, cMsrs));
1690 }
1691 }
1692 }
1693}
1694#endif /* VBOX_STRICT */
1695
1696
1697/**
1698 * Flushes the TLB using EPT.
1699 *
1700 * @returns VBox status code.
1701 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1702 * enmFlush).
1703 * @param enmFlush Type of flush.
1704 *
1705 * @remarks Caller is responsible for making sure this function is called only
1706 * when NestedPaging is supported and providing @a enmFlush that is
1707 * supported by the CPU.
1708 * @remarks Can be called with interrupts disabled.
1709 */
1710static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush)
1711{
1712 uint64_t au64Descriptor[2];
1713 if (enmFlush == VMXFLUSHEPT_ALL_CONTEXTS)
1714 au64Descriptor[0] = 0;
1715 else
1716 {
1717 Assert(pVCpu);
1718 au64Descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1719 }
1720 au64Descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
1721
1722 int rc = VMXR0InvEPT(enmFlush, &au64Descriptor[0]);
1723 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0,
1724 rc));
1725 if ( RT_SUCCESS(rc)
1726 && pVCpu)
1727 {
1728 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1729 }
1730}
1731
1732
1733/**
1734 * Flushes the TLB using VPID.
1735 *
1736 * @returns VBox status code.
1737 * @param pVM Pointer to the VM.
1738 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1739 * enmFlush).
1740 * @param enmFlush Type of flush.
1741 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1742 * on @a enmFlush).
1743 *
1744 * @remarks Can be called with interrupts disabled.
1745 */
1746static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr)
1747{
1748 NOREF(pVM);
1749 AssertPtr(pVM);
1750 Assert(pVM->hm.s.vmx.fVpid);
1751
1752 uint64_t au64Descriptor[2];
1753 if (enmFlush == VMXFLUSHVPID_ALL_CONTEXTS)
1754 {
1755 au64Descriptor[0] = 0;
1756 au64Descriptor[1] = 0;
1757 }
1758 else
1759 {
1760 AssertPtr(pVCpu);
1761 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1762 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1763 au64Descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1764 au64Descriptor[1] = GCPtr;
1765 }
1766
1767 int rc = VMXR0InvVPID(enmFlush, &au64Descriptor[0]); NOREF(rc);
1768 AssertMsg(rc == VINF_SUCCESS,
1769 ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1770 if ( RT_SUCCESS(rc)
1771 && pVCpu)
1772 {
1773 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1774 }
1775}
1776
1777
1778/**
1779 * Invalidates a guest page by guest virtual address. Only relevant for
1780 * EPT/VPID, otherwise there is nothing really to invalidate.
1781 *
1782 * @returns VBox status code.
1783 * @param pVM Pointer to the VM.
1784 * @param pVCpu Pointer to the VMCPU.
1785 * @param GCVirt Guest virtual address of the page to invalidate.
1786 */
1787VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
1788{
1789 AssertPtr(pVM);
1790 AssertPtr(pVCpu);
1791 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
1792
1793 bool fFlushPending = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
1794 if (!fFlushPending)
1795 {
1796 /*
1797 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
1798 * See @bugref{6043} and @bugref{6177}.
1799 *
1800 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
1801 * function maybe called in a loop with individual addresses.
1802 */
1803 if (pVM->hm.s.vmx.fVpid)
1804 {
1805 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1806 {
1807 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, GCVirt);
1808 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1809 }
1810 else
1811 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1812 }
1813 else if (pVM->hm.s.fNestedPaging)
1814 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1815 }
1816
1817 return VINF_SUCCESS;
1818}
1819
1820
1821/**
1822 * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
1823 * otherwise there is nothing really to invalidate.
1824 *
1825 * @returns VBox status code.
1826 * @param pVM Pointer to the VM.
1827 * @param pVCpu Pointer to the VMCPU.
1828 * @param GCPhys Guest physical address of the page to invalidate.
1829 */
1830VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1831{
1832 NOREF(pVM); NOREF(GCPhys);
1833 LogFlowFunc(("%RGp\n", GCPhys));
1834
1835 /*
1836 * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
1837 * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
1838 * This function might be called in a loop. This should cause a flush-by-EPT if EPT is in use. See @bugref{6568}.
1839 */
1840 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1841 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
1842 return VINF_SUCCESS;
1843}
1844
1845
1846/**
1847 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1848 * case where neither EPT nor VPID is supported by the CPU.
1849 *
1850 * @param pVM Pointer to the VM.
1851 * @param pVCpu Pointer to the VMCPU.
1852 * @param pCpu Pointer to the global HM struct.
1853 *
1854 * @remarks Called with interrupts disabled.
1855 */
1856static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1857{
1858 AssertPtr(pVCpu);
1859 AssertPtr(pCpu);
1860 NOREF(pVM);
1861
1862 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1863
1864 /** @todo TLB shootdown is currently not used. See hmQueueInvlPage(). */
1865#if 0
1866 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1867 pVCpu->hm.s.TlbShootdown.cPages = 0;
1868#endif
1869
1870 Assert(pCpu->idCpu != NIL_RTCPUID);
1871 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1872 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1873 pVCpu->hm.s.fForceTLBFlush = false;
1874 return;
1875}
1876
1877
1878/**
1879 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1880 *
1881 * @param pVM Pointer to the VM.
1882 * @param pVCpu Pointer to the VMCPU.
1883 * @param pCpu Pointer to the global HM CPU struct.
1884 * @remarks All references to "ASID" in this function pertains to "VPID" in
1885 * Intel's nomenclature. The reason is, to avoid confusion in compare
1886 * statements since the host-CPU copies are named "ASID".
1887 *
1888 * @remarks Called with interrupts disabled.
1889 */
1890static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1891{
1892#ifdef VBOX_WITH_STATISTICS
1893 bool fTlbFlushed = false;
1894# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0)
1895# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \
1896 if (!fTlbFlushed) \
1897 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
1898 } while (0)
1899#else
1900# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0)
1901# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0)
1902#endif
1903
1904 AssertPtr(pVM);
1905 AssertPtr(pCpu);
1906 AssertPtr(pVCpu);
1907 Assert(pCpu->idCpu != NIL_RTCPUID);
1908
1909 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1910 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1911 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1912
1913 /*
1914 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1915 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1916 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1917 */
1918 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1919 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1920 {
1921 ++pCpu->uCurrentAsid;
1922 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1923 {
1924 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */
1925 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1926 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1927 }
1928
1929 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1930 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1931 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1932
1933 /*
1934 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
1935 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
1936 */
1937 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1938 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1939 HMVMX_SET_TAGGED_TLB_FLUSHED();
1940 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); /* Already flushed-by-EPT, skip doing it again below. */
1941 }
1942
1943 /* Check for explicit TLB shootdowns. */
1944 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1945 {
1946 /*
1947 * Changes to the EPT paging structure by VMM requires flushing by EPT as the CPU creates
1948 * guest-physical (only EPT-tagged) mappings while traversing the EPT tables when EPT is in use.
1949 * Flushing by VPID will only flush linear (only VPID-tagged) and combined (EPT+VPID tagged) mappings
1950 * but not guest-physical mappings.
1951 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". See @bugref{6568}.
1952 */
1953 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1954 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1955 HMVMX_SET_TAGGED_TLB_FLUSHED();
1956 }
1957
1958 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere. See hmQueueInvlPage()
1959 * where it is commented out. Support individual entry flushing
1960 * someday. */
1961#if 0
1962 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1963 {
1964 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1965
1966 /*
1967 * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
1968 * as supported by the CPU.
1969 */
1970 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1971 {
1972 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1973 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1974 }
1975 else
1976 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1977
1978 HMVMX_SET_TAGGED_TLB_FLUSHED();
1979 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1980 pVCpu->hm.s.TlbShootdown.cPages = 0;
1981 }
1982#endif
1983
1984 pVCpu->hm.s.fForceTLBFlush = false;
1985
1986 HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
1987
1988 Assert(pVCpu->hm.s.idLastCpu == pCpu->idCpu);
1989 Assert(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes);
1990 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1991 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1992 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1993 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
1994 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
1995 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1996 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1997
1998 /* Update VMCS with the VPID. */
1999 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
2000 AssertRC(rc);
2001
2002#undef HMVMX_SET_TAGGED_TLB_FLUSHED
2003}
2004
2005
2006/**
2007 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
2008 *
2009 * @returns VBox status code.
2010 * @param pVM Pointer to the VM.
2011 * @param pVCpu Pointer to the VMCPU.
2012 * @param pCpu Pointer to the global HM CPU struct.
2013 *
2014 * @remarks Called with interrupts disabled.
2015 */
2016static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2017{
2018 AssertPtr(pVM);
2019 AssertPtr(pVCpu);
2020 AssertPtr(pCpu);
2021 Assert(pCpu->idCpu != NIL_RTCPUID);
2022 AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
2023 AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
2024
2025 /*
2026 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
2027 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
2028 */
2029 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2030 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2031 {
2032 pVCpu->hm.s.fForceTLBFlush = true;
2033 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2034 }
2035
2036 /* Check for explicit TLB shootdown flushes. */
2037 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2038 {
2039 pVCpu->hm.s.fForceTLBFlush = true;
2040 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2041 }
2042
2043 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2044 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2045
2046 if (pVCpu->hm.s.fForceTLBFlush)
2047 {
2048 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
2049 pVCpu->hm.s.fForceTLBFlush = false;
2050 }
2051 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere. See hmQueueInvlPage()
2052 * where it is commented out. Support individual entry flushing
2053 * someday. */
2054#if 0
2055 else
2056 {
2057 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2058 {
2059 /* We cannot flush individual entries without VPID support. Flush using EPT. */
2060 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
2061 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
2062 }
2063 else
2064 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
2065
2066 pVCpu->hm.s.TlbShootdown.cPages = 0;
2067 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2068 }
2069#endif
2070}
2071
2072
2073/**
2074 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
2075 *
2076 * @returns VBox status code.
2077 * @param pVM Pointer to the VM.
2078 * @param pVCpu Pointer to the VMCPU.
2079 * @param pCpu Pointer to the global HM CPU struct.
2080 *
2081 * @remarks Called with interrupts disabled.
2082 */
2083static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2084{
2085 AssertPtr(pVM);
2086 AssertPtr(pVCpu);
2087 AssertPtr(pCpu);
2088 Assert(pCpu->idCpu != NIL_RTCPUID);
2089 AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
2090 AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
2091
2092 /*
2093 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
2094 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
2095 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
2096 */
2097 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2098 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2099 {
2100 pVCpu->hm.s.fForceTLBFlush = true;
2101 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2102 }
2103
2104 /* Check for explicit TLB shootdown flushes. */
2105 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2106 {
2107 /*
2108 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
2109 * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
2110 * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
2111 */
2112 pVCpu->hm.s.fForceTLBFlush = true;
2113 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2114 }
2115
2116 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2117 if (pVCpu->hm.s.fForceTLBFlush)
2118 {
2119 ++pCpu->uCurrentAsid;
2120 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
2121 {
2122 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */
2123 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
2124 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
2125 }
2126
2127 pVCpu->hm.s.fForceTLBFlush = false;
2128 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2129 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
2130 if (pCpu->fFlushAsidBeforeUse)
2131 {
2132 if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_SINGLE_CONTEXT)
2133 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);
2134 else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_ALL_CONTEXTS)
2135 {
2136 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
2137 pCpu->fFlushAsidBeforeUse = false;
2138 }
2139 else
2140 {
2141 /* hmR0VmxSetupTaggedTlb() ensures we never get here. Paranoia. */
2142 AssertMsgFailed(("Unsupported VPID-flush context type.\n"));
2143 }
2144 }
2145 }
2146 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere. See hmQueueInvlPage()
2147 * where it is commented out. Support individual entry flushing
2148 * someday. */
2149#if 0
2150 else
2151 {
2152 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
2153 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
2154 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
2155 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
2156
2157 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2158 {
2159 /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */
2160 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2161 {
2162 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
2163 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
2164 }
2165 else
2166 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
2167
2168 pVCpu->hm.s.TlbShootdown.cPages = 0;
2169 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2170 }
2171 else
2172 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
2173 }
2174#endif
2175
2176 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
2177 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
2178 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2179 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
2180 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2181 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2182 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2183
2184 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
2185 AssertRC(rc);
2186}
2187
2188
2189/**
2190 * Flushes the guest TLB entry based on CPU capabilities.
2191 *
2192 * @param pVCpu Pointer to the VMCPU.
2193 * @param pCpu Pointer to the global HM CPU struct.
2194 */
2195DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2196{
2197#ifdef HMVMX_ALWAYS_FLUSH_TLB
2198 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2199#endif
2200 PVM pVM = pVCpu->CTX_SUFF(pVM);
2201 switch (pVM->hm.s.vmx.uFlushTaggedTlb)
2202 {
2203 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu, pCpu); break;
2204 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu, pCpu); break;
2205 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu, pCpu); break;
2206 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu, pCpu); break;
2207 default:
2208 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
2209 break;
2210 }
2211
2212 /* VMCPU_FF_TLB_SHOOTDOWN is unused. */
2213 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN));
2214
2215 /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */
2216}
2217
2218
2219/**
2220 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
2221 * TLB entries from the host TLB before VM-entry.
2222 *
2223 * @returns VBox status code.
2224 * @param pVM Pointer to the VM.
2225 */
2226static int hmR0VmxSetupTaggedTlb(PVM pVM)
2227{
2228 /*
2229 * Determine optimal flush type for Nested Paging.
2230 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
2231 * guest execution (see hmR3InitFinalizeR0()).
2232 */
2233 if (pVM->hm.s.fNestedPaging)
2234 {
2235 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
2236 {
2237 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
2238 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_SINGLE_CONTEXT;
2239 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
2240 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_ALL_CONTEXTS;
2241 else
2242 {
2243 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
2244 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2245 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2246 }
2247
2248 /* Make sure the write-back cacheable memory type for EPT is supported. */
2249 if (!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))
2250 {
2251 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.Msrs.u64EptVpidCaps));
2252 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2253 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2254 }
2255 }
2256 else
2257 {
2258 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
2259 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2260 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2261 }
2262 }
2263
2264 /*
2265 * Determine optimal flush type for VPID.
2266 */
2267 if (pVM->hm.s.vmx.fVpid)
2268 {
2269 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
2270 {
2271 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
2272 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_SINGLE_CONTEXT;
2273 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
2274 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_ALL_CONTEXTS;
2275 else
2276 {
2277 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
2278 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2279 LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
2280 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
2281 LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
2282 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED;
2283 pVM->hm.s.vmx.fVpid = false;
2284 }
2285 }
2286 else
2287 {
2288 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
2289 Log4(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
2290 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED;
2291 pVM->hm.s.vmx.fVpid = false;
2292 }
2293 }
2294
2295 /*
2296 * Setup the handler for flushing tagged-TLBs.
2297 */
2298 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
2299 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT_VPID;
2300 else if (pVM->hm.s.fNestedPaging)
2301 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT;
2302 else if (pVM->hm.s.vmx.fVpid)
2303 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_VPID;
2304 else
2305 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_NONE;
2306 return VINF_SUCCESS;
2307}
2308
2309
2310/**
2311 * Sets up pin-based VM-execution controls in the VMCS.
2312 *
2313 * @returns VBox status code.
2314 * @param pVM Pointer to the VM.
2315 * @param pVCpu Pointer to the VMCPU.
2316 */
2317static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
2318{
2319 AssertPtr(pVM);
2320 AssertPtr(pVCpu);
2321
2322 uint32_t val = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0; /* Bits set here must always be set. */
2323 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
2324
2325 val |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts cause a VM-exit. */
2326 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts (NMIs) cause a VM-exit. */
2327
2328 if (pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
2329 val |= VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */
2330
2331 /* Enable the VMX preemption timer. */
2332 if (pVM->hm.s.vmx.fUsePreemptTimer)
2333 {
2334 Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
2335 val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
2336 }
2337
2338 if ((val & zap) != val)
2339 {
2340 LogRel(("hmR0VmxSetupPinCtls: invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
2341 pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, val, zap));
2342 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
2343 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2344 }
2345
2346 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, val);
2347 AssertRCReturn(rc, rc);
2348
2349 /* Update VCPU with the currently set pin-based VM-execution controls. */
2350 pVCpu->hm.s.vmx.u32PinCtls = val;
2351 return rc;
2352}
2353
2354
2355/**
2356 * Sets up processor-based VM-execution controls in the VMCS.
2357 *
2358 * @returns VBox status code.
2359 * @param pVM Pointer to the VM.
2360 * @param pVMCPU Pointer to the VMCPU.
2361 */
2362static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
2363{
2364 AssertPtr(pVM);
2365 AssertPtr(pVCpu);
2366
2367 int rc = VERR_INTERNAL_ERROR_5;
2368 uint32_t val = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */
2369 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2370
2371 val |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */
2372 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
2373 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
2374 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
2375 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */
2376 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */
2377 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
2378
2379 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
2380 if ( !(pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
2381 || (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
2382 {
2383 LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
2384 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
2385 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2386 }
2387
2388 /* Without Nested Paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
2389 if (!pVM->hm.s.fNestedPaging)
2390 {
2391 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
2392 val |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
2393 | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
2394 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
2395 }
2396
2397 /* Use TPR shadowing if supported by the CPU. */
2398 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
2399 {
2400 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2401 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
2402 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
2403 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
2404 AssertRCReturn(rc, rc);
2405
2406 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
2407 /* CR8 writes cause a VM-exit based on TPR threshold. */
2408 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));
2409 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));
2410 }
2411 else
2412 {
2413 /*
2414 * Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is invalid on 32-bit Intel CPUs.
2415 * Set this control only for 64-bit guests.
2416 */
2417 if (pVM->hm.s.fAllow64BitGuests)
2418 {
2419 val |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */
2420 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */
2421 }
2422 }
2423
2424 /* Use MSR-bitmaps if supported by the CPU. */
2425 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
2426 {
2427 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
2428
2429 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2430 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
2431 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2432 AssertRCReturn(rc, rc);
2433
2434 /*
2435 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
2436 * automatically using dedicated fields in the VMCS.
2437 */
2438 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2439 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2440 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2441 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2442 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2443
2444#if HC_ARCH_BITS == 64
2445 /*
2446 * Set passthru permissions for the following MSRs (mandatory for VT-x) required for 64-bit guests.
2447 */
2448 if (pVM->hm.s.fAllow64BitGuests)
2449 {
2450 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2451 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2452 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2453 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2454 }
2455#endif
2456 }
2457
2458 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
2459 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
2460 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
2461
2462 if ((val & zap) != val)
2463 {
2464 LogRel(("hmR0VmxSetupProcCtls: invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
2465 pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, val, zap));
2466 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
2467 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2468 }
2469
2470 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, val);
2471 AssertRCReturn(rc, rc);
2472
2473 /* Update VCPU with the currently set processor-based VM-execution controls. */
2474 pVCpu->hm.s.vmx.u32ProcCtls = val;
2475
2476 /*
2477 * Secondary processor-based VM-execution controls.
2478 */
2479 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
2480 {
2481 val = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
2482 zap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2483
2484 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
2485 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */
2486
2487 if (pVM->hm.s.fNestedPaging)
2488 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */
2489 else
2490 {
2491 /*
2492 * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
2493 * VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT when INVPCID is executed by the guest.
2494 * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
2495 */
2496 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
2497 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
2498 }
2499
2500 if (pVM->hm.s.vmx.fVpid)
2501 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */
2502
2503 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2504 val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */
2505
2506 /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
2507 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
2508 * done dynamically. */
2509 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
2510 {
2511 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
2512 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
2513 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
2514 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
2515 AssertRCReturn(rc, rc);
2516 }
2517
2518 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
2519 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */
2520
2521 if ((val & zap) != val)
2522 {
2523 LogRel(("hmR0VmxSetupProcCtls: invalid secondary processor-based VM-execution controls combo! "
2524 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, val, zap));
2525 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
2526 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2527 }
2528
2529 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, val);
2530 AssertRCReturn(rc, rc);
2531
2532 /* Update VCPU with the currently set secondary processor-based VM-execution controls. */
2533 pVCpu->hm.s.vmx.u32ProcCtls2 = val;
2534 }
2535 else if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest))
2536 {
2537 LogRel(("hmR0VmxSetupProcCtls: Unrestricted Guest set as true when secondary processor-based VM-execution controls not "
2538 "available\n"));
2539 pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO;
2540 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2541 }
2542
2543 return VINF_SUCCESS;
2544}
2545
2546
2547/**
2548 * Sets up miscellaneous (everything other than Pin & Processor-based
2549 * VM-execution) control fields in the VMCS.
2550 *
2551 * @returns VBox status code.
2552 * @param pVM Pointer to the VM.
2553 * @param pVCpu Pointer to the VMCPU.
2554 */
2555static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
2556{
2557 NOREF(pVM);
2558 AssertPtr(pVM);
2559 AssertPtr(pVCpu);
2560
2561 int rc = VERR_GENERAL_FAILURE;
2562
2563 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2564#if 0
2565 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestCR3AndCR4())*/
2566 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); AssertRCReturn(rc, rc);
2567 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); AssertRCReturn(rc, rc);
2568
2569 /*
2570 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
2571 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
2572 * We thus use the exception bitmap to control it rather than use both.
2573 */
2574 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0); AssertRCReturn(rc, rc);
2575 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0); AssertRCReturn(rc, rc);
2576
2577 /** @todo Explore possibility of using IO-bitmaps. */
2578 /* All IO & IOIO instructions cause VM-exits. */
2579 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); AssertRCReturn(rc, rc);
2580 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); AssertRCReturn(rc, rc);
2581
2582 /* Initialize the MSR-bitmap area. */
2583 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
2584 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); AssertRCReturn(rc, rc);
2585 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
2586#endif
2587
2588 /* Setup MSR auto-load/store area. */
2589 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
2590 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
2591 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2592 AssertRCReturn(rc, rc);
2593 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2594 AssertRCReturn(rc, rc);
2595
2596 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
2597 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
2598 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
2599 AssertRCReturn(rc, rc);
2600
2601 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
2602 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
2603 AssertRCReturn(rc, rc);
2604
2605 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2606#if 0
2607 /* Setup debug controls */
2608 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */
2609 AssertRCReturn(rc, rc);
2610 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
2611 AssertRCReturn(rc, rc);
2612#endif
2613
2614 return rc;
2615}
2616
2617
2618/**
2619 * Sets up the initial exception bitmap in the VMCS based on static conditions
2620 * (i.e. conditions that cannot ever change after starting the VM).
2621 *
2622 * @returns VBox status code.
2623 * @param pVM Pointer to the VM.
2624 * @param pVCpu Pointer to the VMCPU.
2625 */
2626static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
2627{
2628 AssertPtr(pVM);
2629 AssertPtr(pVCpu);
2630
2631 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
2632
2633 uint32_t u32XcptBitmap = 0;
2634
2635 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
2636 if (!pVM->hm.s.fNestedPaging)
2637 u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
2638
2639 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
2640 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
2641 AssertRCReturn(rc, rc);
2642 return rc;
2643}
2644
2645
2646/**
2647 * Sets up the initial guest-state mask. The guest-state mask is consulted
2648 * before reading guest-state fields from the VMCS as VMREADs can be expensive
2649 * for the nested virtualization case (as it would cause a VM-exit).
2650 *
2651 * @param pVCpu Pointer to the VMCPU.
2652 */
2653static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
2654{
2655 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
2656 HMVMXCPU_GST_RESET_TO(pVCpu, HMVMX_UPDATED_GUEST_ALL);
2657 return VINF_SUCCESS;
2658}
2659
2660
2661/**
2662 * Does per-VM VT-x initialization.
2663 *
2664 * @returns VBox status code.
2665 * @param pVM Pointer to the VM.
2666 */
2667VMMR0DECL(int) VMXR0InitVM(PVM pVM)
2668{
2669 LogFlowFunc(("pVM=%p\n", pVM));
2670
2671 int rc = hmR0VmxStructsAlloc(pVM);
2672 if (RT_FAILURE(rc))
2673 {
2674 LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
2675 return rc;
2676 }
2677
2678 return VINF_SUCCESS;
2679}
2680
2681
2682/**
2683 * Does per-VM VT-x termination.
2684 *
2685 * @returns VBox status code.
2686 * @param pVM Pointer to the VM.
2687 */
2688VMMR0DECL(int) VMXR0TermVM(PVM pVM)
2689{
2690 LogFlowFunc(("pVM=%p\n", pVM));
2691
2692#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2693 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
2694 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
2695#endif
2696 hmR0VmxStructsFree(pVM);
2697 return VINF_SUCCESS;
2698}
2699
2700
2701/**
2702 * Sets up the VM for execution under VT-x.
2703 * This function is only called once per-VM during initialization.
2704 *
2705 * @returns VBox status code.
2706 * @param pVM Pointer to the VM.
2707 */
2708VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
2709{
2710 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
2711 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2712
2713 LogFlowFunc(("pVM=%p\n", pVM));
2714
2715 /*
2716 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
2717 * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0().
2718 */
2719 /* -XXX- change hmR3InitFinalizeR0Intel() to fail if pRealModeTSS alloc fails. */
2720 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
2721 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
2722 || !pVM->hm.s.vmx.pRealModeTSS))
2723 {
2724 LogRel(("VMXR0SetupVM: invalid real-on-v86 state.\n"));
2725 return VERR_INTERNAL_ERROR;
2726 }
2727
2728#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2729 /*
2730 * This is for the darwin 32-bit/PAE kernels trying to execute 64-bit guests. We don't bother with
2731 * the 32<->64 switcher in this case. This is a rare, legacy use-case with barely any test coverage.
2732 */
2733 if ( pVM->hm.s.fAllow64BitGuests
2734 && !HMVMX_IS_64BIT_HOST_MODE())
2735 {
2736 LogRel(("VMXR0SetupVM: Unsupported guest and host paging mode combination.\n"));
2737 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
2738 }
2739#endif
2740
2741 /* Initialize these always, see hmR3InitFinalizeR0().*/
2742 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NONE;
2743 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NONE;
2744
2745 /* Setup the tagged-TLB flush handlers. */
2746 int rc = hmR0VmxSetupTaggedTlb(pVM);
2747 if (RT_FAILURE(rc))
2748 {
2749 LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
2750 return rc;
2751 }
2752
2753 /* Check if we can use the VMCS controls for swapping the EFER MSR. */
2754 Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer);
2755#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2756 if ( HMVMX_IS_64BIT_HOST_MODE()
2757 && (pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
2758 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
2759 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR))
2760 {
2761 pVM->hm.s.vmx.fSupportsVmcsEfer = true;
2762 }
2763#endif
2764
2765 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2766 {
2767 PVMCPU pVCpu = &pVM->aCpus[i];
2768 AssertPtr(pVCpu);
2769 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
2770
2771 /* Log the VCPU pointers, useful for debugging SMP VMs. */
2772 Log4(("VMXR0SetupVM: pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
2773
2774 /* Initialize the VM-exit history array with end-of-array markers (UINT16_MAX). */
2775 Assert(!pVCpu->hm.s.idxExitHistoryFree);
2776 HMCPU_EXIT_HISTORY_RESET(pVCpu);
2777
2778 /* Set revision dword at the beginning of the VMCS structure. */
2779 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
2780
2781 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
2782 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2783 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2784 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2785
2786 /* Load this VMCS as the current VMCS. */
2787 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2788 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2789 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2790
2791 rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
2792 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2793 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2794
2795 rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
2796 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2797 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2798
2799 rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
2800 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2801 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2802
2803 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
2804 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2805 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2806
2807 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
2808 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2809 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2810
2811#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2812 rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
2813 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2814 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2815#endif
2816
2817 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
2818 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2819 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2820 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2821
2822 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
2823
2824 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
2825 }
2826
2827 return VINF_SUCCESS;
2828}
2829
2830
2831/**
2832 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
2833 * the VMCS.
2834 *
2835 * @returns VBox status code.
2836 * @param pVM Pointer to the VM.
2837 * @param pVCpu Pointer to the VMCPU.
2838 */
2839DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
2840{
2841 NOREF(pVM); NOREF(pVCpu);
2842
2843 RTCCUINTREG uReg = ASMGetCR0();
2844 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
2845 AssertRCReturn(rc, rc);
2846
2847#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2848 /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */
2849 if (HMVMX_IS_64BIT_HOST_MODE())
2850 {
2851 uint64_t uRegCR3 = HMR0Get64bitCR3();
2852 rc = VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uRegCR3);
2853 }
2854 else
2855#endif
2856 {
2857 uReg = ASMGetCR3();
2858 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2859 }
2860 AssertRCReturn(rc, rc);
2861
2862 uReg = ASMGetCR4();
2863 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2864 AssertRCReturn(rc, rc);
2865 return rc;
2866}
2867
2868
2869#if HC_ARCH_BITS == 64
2870/**
2871 * Macro for adjusting host segment selectors to satisfy VT-x's VM-entry
2872 * requirements. See hmR0VmxSaveHostSegmentRegs().
2873 */
2874# define VMXLOCAL_ADJUST_HOST_SEG(seg, selValue) \
2875 if ((selValue) & (X86_SEL_RPL | X86_SEL_LDT)) \
2876 { \
2877 bool fValidSelector = true; \
2878 if ((selValue) & X86_SEL_LDT) \
2879 { \
2880 uint32_t uAttr = ASMGetSegAttr((selValue)); \
2881 fValidSelector = RT_BOOL(uAttr != UINT32_MAX && (uAttr & X86_DESC_P)); \
2882 } \
2883 if (fValidSelector) \
2884 { \
2885 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##seg; \
2886 pVCpu->hm.s.vmx.RestoreHost.uHostSel##seg = (selValue); \
2887 } \
2888 (selValue) = 0; \
2889 }
2890#endif
2891
2892
2893/**
2894 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2895 * the host-state area in the VMCS.
2896 *
2897 * @returns VBox status code.
2898 * @param pVM Pointer to the VM.
2899 * @param pVCpu Pointer to the VMCPU.
2900 */
2901DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
2902{
2903 NOREF(pVM);
2904 int rc = VERR_INTERNAL_ERROR_5;
2905
2906#if HC_ARCH_BITS == 64
2907 /*
2908 * If we've executed guest code using VT-x, the host-state bits will be messed up. We
2909 * should -not- save the messed up state without restoring the original host-state. See @bugref{7240}.
2910 */
2911 AssertMsgReturn(!(pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED),
2912 ("Re-saving host-state after executing guest code without leaving VT-x!\n"), VERR_WRONG_ORDER);
2913#endif
2914
2915 /*
2916 * Host DS, ES, FS and GS segment registers.
2917 */
2918#if HC_ARCH_BITS == 64
2919 RTSEL uSelDS = ASMGetDS();
2920 RTSEL uSelES = ASMGetES();
2921 RTSEL uSelFS = ASMGetFS();
2922 RTSEL uSelGS = ASMGetGS();
2923#else
2924 RTSEL uSelDS = 0;
2925 RTSEL uSelES = 0;
2926 RTSEL uSelFS = 0;
2927 RTSEL uSelGS = 0;
2928#endif
2929
2930 /* Recalculate which host-state bits need to be manually restored. */
2931 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
2932
2933 /*
2934 * Host CS and SS segment registers.
2935 */
2936#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2937 RTSEL uSelCS;
2938 RTSEL uSelSS;
2939 if (HMVMX_IS_64BIT_HOST_MODE())
2940 {
2941 uSelCS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS;
2942 uSelSS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelSS;
2943 }
2944 else
2945 {
2946 /* Seems darwin uses the LDT (TI flag is set) in the CS & SS selectors which VT-x doesn't like. */
2947 uSelCS = (RTSEL)(uintptr_t)&SUPR0AbsKernelCS;
2948 uSelSS = (RTSEL)(uintptr_t)&SUPR0AbsKernelSS;
2949 }
2950#else
2951 RTSEL uSelCS = ASMGetCS();
2952 RTSEL uSelSS = ASMGetSS();
2953#endif
2954
2955 /*
2956 * Host TR segment register.
2957 */
2958 RTSEL uSelTR = ASMGetTR();
2959
2960#if HC_ARCH_BITS == 64
2961 /*
2962 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to gain VM-entry and restore them
2963 * before we get preempted. See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
2964 */
2965 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS);
2966 VMXLOCAL_ADJUST_HOST_SEG(ES, uSelES);
2967 VMXLOCAL_ADJUST_HOST_SEG(FS, uSelFS);
2968 VMXLOCAL_ADJUST_HOST_SEG(GS, uSelGS);
2969# undef VMXLOCAL_ADJUST_HOST_SEG
2970#endif
2971
2972 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2973 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2974 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2975 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2976 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2977 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2978 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2979 Assert(!(uSelTR & X86_SEL_RPL)); Assert(!(uSelTR & X86_SEL_LDT));
2980 Assert(uSelCS);
2981 Assert(uSelTR);
2982
2983 /* Assertion is right but we would not have updated u32ExitCtls yet. */
2984#if 0
2985 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE))
2986 Assert(uSelSS != 0);
2987#endif
2988
2989 /* Write these host selector fields into the host-state area in the VMCS. */
2990 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS); AssertRCReturn(rc, rc);
2991 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS); AssertRCReturn(rc, rc);
2992#if HC_ARCH_BITS == 64
2993 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS); AssertRCReturn(rc, rc);
2994 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES); AssertRCReturn(rc, rc);
2995 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS); AssertRCReturn(rc, rc);
2996 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS); AssertRCReturn(rc, rc);
2997#endif
2998 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR); AssertRCReturn(rc, rc);
2999
3000 /*
3001 * Host GDTR and IDTR.
3002 */
3003 RTGDTR Gdtr;
3004 RT_ZERO(Gdtr);
3005#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
3006 if (HMVMX_IS_64BIT_HOST_MODE())
3007 {
3008 X86XDTR64 Gdtr64;
3009 X86XDTR64 Idtr64;
3010 HMR0Get64bitGdtrAndIdtr(&Gdtr64, &Idtr64);
3011 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr); AssertRCReturn(rc, rc);
3012 rc = VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr); AssertRCReturn(rc, rc);
3013
3014 Gdtr.cbGdt = Gdtr64.cb;
3015 Gdtr.pGdt = (uintptr_t)Gdtr64.uAddr;
3016 }
3017 else
3018#endif
3019 {
3020 RTIDTR Idtr;
3021 ASMGetGDTR(&Gdtr);
3022 ASMGetIDTR(&Idtr);
3023 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); AssertRCReturn(rc, rc);
3024 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); AssertRCReturn(rc, rc);
3025
3026#if HC_ARCH_BITS == 64
3027 /*
3028 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the
3029 * maximum limit (0xffff) on every VM-exit.
3030 */
3031 if (Gdtr.cbGdt != 0xffff)
3032 {
3033 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
3034 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
3035 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
3036 }
3037
3038 /*
3039 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT"
3040 * and Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the limit as 0xfff, VT-x
3041 * bloating the limit to 0xffff shouldn't cause any different CPU behavior. However, several hosts either insists
3042 * on 0xfff being the limit (Windows Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there
3043 * but botches sidt alignment in at least one consumer). So, we're only allowing IDTR.LIMIT to be left at 0xffff on
3044 * hosts where we are pretty sure it won't cause trouble.
3045 */
3046# if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
3047 if (Idtr.cbIdt < 0x0fff)
3048# else
3049 if (Idtr.cbIdt != 0xffff)
3050# endif
3051 {
3052 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
3053 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
3054 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
3055 }
3056#endif
3057 }
3058
3059 /*
3060 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
3061 * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
3062 */
3063 if ((uSelTR | X86_SEL_RPL_LDT) > Gdtr.cbGdt)
3064 {
3065 AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt));
3066 return VERR_VMX_INVALID_HOST_STATE;
3067 }
3068
3069 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
3070#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
3071 if (HMVMX_IS_64BIT_HOST_MODE())
3072 {
3073 /* We need the 64-bit TR base for hybrid darwin. */
3074 uint64_t u64TRBase = X86DESC64_BASE((PX86DESC64)pDesc);
3075 rc = VMXWriteVmcs64(VMX_VMCS_HOST_TR_BASE, u64TRBase);
3076 }
3077 else
3078#endif
3079 {
3080 uintptr_t uTRBase;
3081#if HC_ARCH_BITS == 64
3082 uTRBase = X86DESC64_BASE(pDesc);
3083
3084 /*
3085 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits.
3086 * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else.
3087 * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode.
3088 * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
3089 *
3090 * [1] See Intel spec. 3.5 "System Descriptor Types".
3091 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
3092 */
3093 Assert(pDesc->System.u4Type == 11);
3094 if ( pDesc->System.u16LimitLow != 0x67
3095 || pDesc->System.u4LimitHigh)
3096 {
3097 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
3098 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR;
3099
3100 /* Store the GDTR here as we need it while restoring TR. */
3101 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
3102 }
3103#else
3104 uTRBase = X86DESC_BASE(pDesc);
3105#endif
3106 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
3107 }
3108 AssertRCReturn(rc, rc);
3109
3110 /*
3111 * Host FS base and GS base.
3112 */
3113#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3114 if (HMVMX_IS_64BIT_HOST_MODE())
3115 {
3116 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
3117 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
3118 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); AssertRCReturn(rc, rc);
3119 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); AssertRCReturn(rc, rc);
3120
3121# if HC_ARCH_BITS == 64
3122 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
3123 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
3124 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
3125 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
3126 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
3127# endif
3128 }
3129#endif
3130 return rc;
3131}
3132
3133
3134/**
3135 * Saves certain host MSRs in the VM-Exit MSR-load area and some in the
3136 * host-state area of the VMCS. Theses MSRs will be automatically restored on
3137 * the host after every successful VM-exit.
3138 *
3139 * @returns VBox status code.
3140 * @param pVM Pointer to the VM.
3141 * @param pVCpu Pointer to the VMCPU.
3142 *
3143 * @remarks No-long-jump zone!!!
3144 */
3145DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
3146{
3147 NOREF(pVM);
3148
3149 AssertPtr(pVCpu);
3150 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
3151
3152 int rc = VINF_SUCCESS;
3153#if HC_ARCH_BITS == 64
3154 if (pVM->hm.s.fAllow64BitGuests)
3155 hmR0VmxLazySaveHostMsrs(pVCpu);
3156#endif
3157
3158 /*
3159 * Host Sysenter MSRs.
3160 */
3161 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
3162 AssertRCReturn(rc, rc);
3163#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
3164 if (HMVMX_IS_64BIT_HOST_MODE())
3165 {
3166 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
3167 AssertRCReturn(rc, rc);
3168 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
3169 }
3170 else
3171 {
3172 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
3173 AssertRCReturn(rc, rc);
3174 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
3175 }
3176#elif HC_ARCH_BITS == 32
3177 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
3178 AssertRCReturn(rc, rc);
3179 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
3180#else
3181 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
3182 AssertRCReturn(rc, rc);
3183 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
3184#endif
3185 AssertRCReturn(rc, rc);
3186
3187 /*
3188 * Host EFER MSR.
3189 * If the CPU supports the newer VMCS controls for managing EFER, use it.
3190 * Otherwise it's done as part of auto-load/store MSR area in the VMCS, see hmR0VmxLoadGuestMsrs().
3191 */
3192 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
3193 {
3194 rc = VMXWriteVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, pVM->hm.s.vmx.u64HostEfer);
3195 AssertRCReturn(rc, rc);
3196 }
3197
3198 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see
3199 * hmR0VmxLoadGuestExitCtls() !! */
3200
3201 return rc;
3202}
3203
3204
3205/**
3206 * Figures out if we need to swap the EFER MSR which is
3207 * particularly expensive.
3208 *
3209 * We check all relevant bits. For now, that's everything
3210 * besides LMA/LME, as these two bits are handled by VM-entry,
3211 * see hmR0VmxLoadGuestExitCtls() and
3212 * hmR0VMxLoadGuestEntryCtls().
3213 *
3214 * @returns true if we need to load guest EFER, false otherwise.
3215 * @param pVCpu Pointer to the VMCPU.
3216 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3217 * out-of-sync. Make sure to update the required fields
3218 * before using them.
3219 *
3220 * @remarks Requires EFER, CR4.
3221 * @remarks No-long-jump zone!!!
3222 */
3223static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3224{
3225#ifdef HMVMX_ALWAYS_SWAP_EFER
3226 return true;
3227#endif
3228
3229#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3230 /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */
3231 if (CPUMIsGuestInLongMode(pVCpu))
3232 return false;
3233#endif
3234
3235 PVM pVM = pVCpu->CTX_SUFF(pVM);
3236 uint64_t u64HostEfer = pVM->hm.s.vmx.u64HostEfer;
3237 uint64_t u64GuestEfer = pMixedCtx->msrEFER;
3238
3239 /*
3240 * For 64-bit guests, if EFER.SCE bit differs, we need to swap to ensure that the
3241 * guest's SYSCALL behaviour isn't screwed. See @bugref{7386}.
3242 */
3243 if ( CPUMIsGuestInLongMode(pVCpu)
3244 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
3245 {
3246 return true;
3247 }
3248
3249 /*
3250 * If the guest uses PAE and EFER.NXE bit differs, we need to swap EFER as it .
3251 * affects guest paging. 64-bit paging implies CR4.PAE as well.
3252 * See Intel spec. 4.5 "IA-32e Paging" and Intel spec. 4.1.1 "Three Paging Modes".
3253 */
3254 if ( (pMixedCtx->cr4 & X86_CR4_PAE)
3255 && (pMixedCtx->cr0 & X86_CR0_PG)
3256 && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE))
3257 {
3258 /* Assert that host is PAE capable. */
3259 Assert(pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_NX);
3260 return true;
3261 }
3262
3263 /** @todo Check the latest Intel spec. for any other bits,
3264 * like SMEP/SMAP? */
3265 return false;
3266}
3267
3268
3269/**
3270 * Sets up VM-entry controls in the VMCS. These controls can affect things done
3271 * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
3272 * controls".
3273 *
3274 * @returns VBox status code.
3275 * @param pVCpu Pointer to the VMCPU.
3276 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3277 * out-of-sync. Make sure to update the required fields
3278 * before using them.
3279 *
3280 * @remarks Requires EFER.
3281 * @remarks No-long-jump zone!!!
3282 */
3283DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3284{
3285 int rc = VINF_SUCCESS;
3286 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS))
3287 {
3288 PVM pVM = pVCpu->CTX_SUFF(pVM);
3289 uint32_t val = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */
3290 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3291
3292 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
3293 val |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
3294
3295 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
3296 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3297 {
3298 val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
3299 Log4(("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n", pVCpu->idCpu));
3300 }
3301 else
3302 Assert(!(val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
3303
3304 /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */
3305 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3306 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3307 {
3308 val |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR;
3309 Log4(("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR\n", pVCpu->idCpu));
3310 }
3311
3312 /*
3313 * The following should -not- be set (since we're not in SMM mode):
3314 * - VMX_VMCS_CTRL_ENTRY_ENTRY_SMM
3315 * - VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON
3316 */
3317
3318 /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR,
3319 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR. */
3320
3321 if ((val & zap) != val)
3322 {
3323 LogRel(("hmR0VmxLoadGuestEntryCtls: invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
3324 pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0, val, zap));
3325 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
3326 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3327 }
3328
3329 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, val);
3330 AssertRCReturn(rc, rc);
3331
3332 /* Update VCPU with the currently set VM-exit controls. */
3333 pVCpu->hm.s.vmx.u32EntryCtls = val;
3334 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS);
3335 }
3336 return rc;
3337}
3338
3339
3340/**
3341 * Sets up the VM-exit controls in the VMCS.
3342 *
3343 * @returns VBox status code.
3344 * @param pVM Pointer to the VM.
3345 * @param pVCpu Pointer to the VMCPU.
3346 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3347 * out-of-sync. Make sure to update the required fields
3348 * before using them.
3349 *
3350 * @remarks Requires EFER.
3351 */
3352DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3353{
3354 NOREF(pMixedCtx);
3355
3356 int rc = VINF_SUCCESS;
3357 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS))
3358 {
3359 PVM pVM = pVCpu->CTX_SUFF(pVM);
3360 uint32_t val = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0; /* Bits set here must be set in the VMCS. */
3361 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3362
3363 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
3364 val |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
3365
3366 /*
3367 * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary.
3368 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in hmR0VmxSaveHostMsrs().
3369 */
3370#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3371 if (HMVMX_IS_64BIT_HOST_MODE())
3372 {
3373 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
3374 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));
3375 }
3376 else
3377 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
3378#else
3379 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3380 {
3381 /* The switcher returns to long mode, EFER is managed by the switcher. */
3382 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
3383 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));
3384 }
3385 else
3386 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
3387#endif /* HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
3388
3389 /* If the newer VMCS fields for managing EFER exists, use it. */
3390 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3391 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3392 {
3393 val |= VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR
3394 | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;
3395 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR, VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n", pVCpu->idCpu));
3396 }
3397
3398 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
3399 Assert(!(val & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));
3400
3401 /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
3402 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR,
3403 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR. */
3404
3405 if (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)
3406 val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
3407
3408 if ((val & zap) != val)
3409 {
3410 LogRel(("hmR0VmxSetupProcCtls: invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
3411 pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, val, zap));
3412 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
3413 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3414 }
3415
3416 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, val);
3417 AssertRCReturn(rc, rc);
3418
3419 /* Update VCPU with the currently set VM-exit controls. */
3420 pVCpu->hm.s.vmx.u32ExitCtls = val;
3421 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_EXIT_CTLS);
3422 }
3423 return rc;
3424}
3425
3426
3427/**
3428 * Loads the guest APIC and related state.
3429 *
3430 * @returns VBox status code.
3431 * @param pVM Pointer to the VM.
3432 * @param pVCpu Pointer to the VMCPU.
3433 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3434 * out-of-sync. Make sure to update the required fields
3435 * before using them.
3436 */
3437DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3438{
3439 NOREF(pMixedCtx);
3440
3441 int rc = VINF_SUCCESS;
3442 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE))
3443 {
3444 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
3445 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
3446 {
3447 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
3448
3449 bool fPendingIntr = false;
3450 uint8_t u8Tpr = 0;
3451 uint8_t u8PendingIntr = 0;
3452 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
3453 AssertRCReturn(rc, rc);
3454
3455 /*
3456 * If there are external interrupts pending but masked by the TPR value, instruct VT-x to cause a VM-exit when
3457 * the guest lowers its TPR below the highest-priority pending interrupt and we can deliver the interrupt.
3458 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
3459 * the interrupt when we VM-exit for other reasons.
3460 */
3461 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
3462 uint32_t u32TprThreshold = 0;
3463 if (fPendingIntr)
3464 {
3465 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR (which is the Task-Priority Class). */
3466 const uint8_t u8PendingPriority = (u8PendingIntr >> 4) & 0xf;
3467 const uint8_t u8TprPriority = (u8Tpr >> 4) & 0xf;
3468 if (u8PendingPriority <= u8TprPriority)
3469 u32TprThreshold = u8PendingPriority;
3470 else
3471 u32TprThreshold = u8TprPriority; /* Required for Vista 64-bit guest, see @bugref{6398}. */
3472 }
3473 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
3474
3475 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
3476 AssertRCReturn(rc, rc);
3477 }
3478
3479 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
3480 }
3481 return rc;
3482}
3483
3484
3485/**
3486 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
3487 *
3488 * @returns Guest's interruptibility-state.
3489 * @param pVCpu Pointer to the VMCPU.
3490 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3491 * out-of-sync. Make sure to update the required fields
3492 * before using them.
3493 *
3494 * @remarks No-long-jump zone!!!
3495 */
3496DECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3497{
3498 /*
3499 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
3500 */
3501 uint32_t uIntrState = 0;
3502 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3503 {
3504 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
3505 AssertMsg(HMVMXCPU_GST_IS_SET(pVCpu, HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS),
3506 ("%#x\n", HMVMXCPU_GST_VALUE(pVCpu)));
3507 if (pMixedCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
3508 {
3509 if (pMixedCtx->eflags.Bits.u1IF)
3510 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
3511 else
3512 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
3513 }
3514 /* else: Although we can clear the force-flag here, let's keep this side-effects free. */
3515 }
3516
3517 /*
3518 * NMIs to the guest are blocked after an NMI is injected until the guest executes an IRET. We only
3519 * bother with virtual-NMI blocking when we have support for virtual NMIs in the CPU, otherwise
3520 * setting this would block host-NMIs and IRET will not clear the blocking.
3521 *
3522 * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}.
3523 */
3524 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)
3525 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
3526 {
3527 uIntrState |= VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI;
3528 }
3529
3530 return uIntrState;
3531}
3532
3533
3534/**
3535 * Loads the guest's interruptibility-state into the guest-state area in the
3536 * VMCS.
3537 *
3538 * @returns VBox status code.
3539 * @param pVCpu Pointer to the VMCPU.
3540 * @param uIntrState The interruptibility-state to set.
3541 */
3542static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
3543{
3544 NOREF(pVCpu);
3545 AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */
3546 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
3547 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
3548 AssertRCReturn(rc, rc);
3549 return rc;
3550}
3551
3552
3553/**
3554 * Loads the guest's RIP into the guest-state area in the VMCS.
3555 *
3556 * @returns VBox status code.
3557 * @param pVCpu Pointer to the VMCPU.
3558 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3559 * out-of-sync. Make sure to update the required fields
3560 * before using them.
3561 *
3562 * @remarks No-long-jump zone!!!
3563 */
3564static int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3565{
3566 int rc = VINF_SUCCESS;
3567 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP))
3568 {
3569 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
3570 AssertRCReturn(rc, rc);
3571
3572 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP);
3573 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu, pMixedCtx->rip,
3574 HMCPU_CF_VALUE(pVCpu)));
3575 }
3576 return rc;
3577}
3578
3579
3580/**
3581 * Loads the guest's RSP into the guest-state area in the VMCS.
3582 *
3583 * @returns VBox status code.
3584 * @param pVCpu Pointer to the VMCPU.
3585 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3586 * out-of-sync. Make sure to update the required fields
3587 * before using them.
3588 *
3589 * @remarks No-long-jump zone!!!
3590 */
3591static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3592{
3593 int rc = VINF_SUCCESS;
3594 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP))
3595 {
3596 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
3597 AssertRCReturn(rc, rc);
3598
3599 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RSP);
3600 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RSP=%#RX64\n", pVCpu->idCpu, pMixedCtx->rsp));
3601 }
3602 return rc;
3603}
3604
3605
3606/**
3607 * Loads the guest's RFLAGS into the guest-state area in the VMCS.
3608 *
3609 * @returns VBox status code.
3610 * @param pVCpu Pointer to the VMCPU.
3611 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3612 * out-of-sync. Make sure to update the required fields
3613 * before using them.
3614 *
3615 * @remarks No-long-jump zone!!!
3616 */
3617static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3618{
3619 int rc = VINF_SUCCESS;
3620 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
3621 {
3622 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
3623 Let us assert it as such and use 32-bit VMWRITE. */
3624 Assert(!(pMixedCtx->rflags.u64 >> 32));
3625 X86EFLAGS Eflags = pMixedCtx->eflags;
3626 Eflags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */
3627 Eflags.u32 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */
3628
3629 /*
3630 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM-exit.
3631 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
3632 */
3633 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3634 {
3635 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3636 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3637 pVCpu->hm.s.vmx.RealMode.Eflags.u32 = Eflags.u32; /* Save the original eflags of the real-mode guest. */
3638 Eflags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
3639 Eflags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
3640 }
3641
3642 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, Eflags.u32);
3643 AssertRCReturn(rc, rc);
3644
3645 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RFLAGS);
3646 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", pVCpu->idCpu, Eflags.u32));
3647 }
3648 return rc;
3649}
3650
3651
3652/**
3653 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
3654 *
3655 * @returns VBox status code.
3656 * @param pVCpu Pointer to the VMCPU.
3657 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3658 * out-of-sync. Make sure to update the required fields
3659 * before using them.
3660 *
3661 * @remarks No-long-jump zone!!!
3662 */
3663DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3664{
3665 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
3666 AssertRCReturn(rc, rc);
3667 rc = hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
3668 AssertRCReturn(rc, rc);
3669 rc = hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
3670 AssertRCReturn(rc, rc);
3671 return rc;
3672}
3673
3674
3675/**
3676 * Loads the guest CR0 control register into the guest-state area in the VMCS.
3677 * CR0 is partially shared with the host and we have to consider the FPU bits.
3678 *
3679 * @returns VBox status code.
3680 * @param pVM Pointer to the VM.
3681 * @param pVCpu Pointer to the VMCPU.
3682 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3683 * out-of-sync. Make sure to update the required fields
3684 * before using them.
3685 *
3686 * @remarks No-long-jump zone!!!
3687 */
3688static int hmR0VmxLoadSharedCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3689{
3690 /*
3691 * Guest CR0.
3692 * Guest FPU.
3693 */
3694 int rc = VINF_SUCCESS;
3695 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
3696 {
3697 Assert(!(pMixedCtx->cr0 >> 32));
3698 uint32_t u32GuestCR0 = pMixedCtx->cr0;
3699 PVM pVM = pVCpu->CTX_SUFF(pVM);
3700
3701 /* The guest's view (read access) of its CR0 is unblemished. */
3702 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0);
3703 AssertRCReturn(rc, rc);
3704 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR0));
3705
3706 /* Setup VT-x's view of the guest CR0. */
3707 /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
3708 if (pVM->hm.s.fNestedPaging)
3709 {
3710 if (CPUMIsGuestPagingEnabledEx(pMixedCtx))
3711 {
3712 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
3713 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3714 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
3715 }
3716 else
3717 {
3718 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
3719 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3720 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3721 }
3722
3723 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
3724 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3725 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3726
3727 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3728 AssertRCReturn(rc, rc);
3729 }
3730 else
3731 u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
3732
3733 /*
3734 * Guest FPU bits.
3735 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
3736 * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
3737 */
3738 u32GuestCR0 |= X86_CR0_NE;
3739 bool fInterceptNM = false;
3740 if (CPUMIsGuestFPUStateActive(pVCpu))
3741 {
3742 fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */
3743 /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.
3744 We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */
3745 }
3746 else
3747 {
3748 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
3749 u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
3750 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
3751 }
3752
3753 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
3754 bool fInterceptMF = false;
3755 if (!(pMixedCtx->cr0 & X86_CR0_NE))
3756 fInterceptMF = true;
3757
3758 /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
3759 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3760 {
3761 Assert(PDMVmmDevHeapIsEnabled(pVM));
3762 Assert(pVM->hm.s.vmx.pRealModeTSS);
3763 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
3764 fInterceptNM = true;
3765 fInterceptMF = true;
3766 }
3767 else
3768 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
3769
3770 if (fInterceptNM)
3771 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);
3772 else
3773 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);
3774
3775 if (fInterceptMF)
3776 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
3777 else
3778 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);
3779
3780 /* Additional intercepts for debugging, define these yourself explicitly. */
3781#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3782 pVCpu->hm.s.vmx.u32XcptBitmap |= 0
3783 | RT_BIT(X86_XCPT_BP)
3784 | RT_BIT(X86_XCPT_DB)
3785 | RT_BIT(X86_XCPT_DE)
3786 | RT_BIT(X86_XCPT_NM)
3787 | RT_BIT(X86_XCPT_TS)
3788 | RT_BIT(X86_XCPT_UD)
3789 | RT_BIT(X86_XCPT_NP)
3790 | RT_BIT(X86_XCPT_SS)
3791 | RT_BIT(X86_XCPT_GP)
3792 | RT_BIT(X86_XCPT_PF)
3793 | RT_BIT(X86_XCPT_MF)
3794 ;
3795#elif defined(HMVMX_ALWAYS_TRAP_PF)
3796 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
3797#endif
3798
3799 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
3800
3801 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
3802 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3803 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3804 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
3805 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
3806 else
3807 Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
3808
3809 u32GuestCR0 |= uSetCR0;
3810 u32GuestCR0 &= uZapCR0;
3811 u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
3812
3813 /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */
3814 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
3815 AssertRCReturn(rc, rc);
3816 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3817 AssertRCReturn(rc, rc);
3818 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", pVCpu->idCpu, u32GuestCR0, uSetCR0,
3819 uZapCR0));
3820
3821 /*
3822 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
3823 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
3824 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
3825 */
3826 uint32_t u32CR0Mask = 0;
3827 u32CR0Mask = X86_CR0_PE
3828 | X86_CR0_NE
3829 | X86_CR0_WP
3830 | X86_CR0_PG
3831 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
3832 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
3833 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
3834
3835 /** @todo Avoid intercepting CR0.PE with unrestricted guests. Fix PGM
3836 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
3837 * and @bugref{6944}. */
3838#if 0
3839 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3840 u32CR0Mask &= ~X86_CR0_PE;
3841#endif
3842 if (pVM->hm.s.fNestedPaging)
3843 u32CR0Mask &= ~X86_CR0_WP;
3844
3845 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
3846 if (fInterceptNM)
3847 {
3848 u32CR0Mask |= X86_CR0_TS
3849 | X86_CR0_MP;
3850 }
3851
3852 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
3853 pVCpu->hm.s.vmx.u32CR0Mask = u32CR0Mask;
3854 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask);
3855 AssertRCReturn(rc, rc);
3856 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_MASK=%#RX32\n", pVCpu->idCpu, u32CR0Mask));
3857
3858 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
3859 }
3860 return rc;
3861}
3862
3863
3864/**
3865 * Loads the guest control registers (CR3, CR4) into the guest-state area
3866 * in the VMCS.
3867 *
3868 * @returns VBox status code.
3869 * @param pVM Pointer to the VM.
3870 * @param pVCpu Pointer to the VMCPU.
3871 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3872 * out-of-sync. Make sure to update the required fields
3873 * before using them.
3874 *
3875 * @remarks No-long-jump zone!!!
3876 */
3877static int hmR0VmxLoadGuestCR3AndCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3878{
3879 int rc = VINF_SUCCESS;
3880 PVM pVM = pVCpu->CTX_SUFF(pVM);
3881
3882 /*
3883 * Guest CR2.
3884 * It's always loaded in the assembler code. Nothing to do here.
3885 */
3886
3887 /*
3888 * Guest CR3.
3889 */
3890 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))
3891 {
3892 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
3893 if (pVM->hm.s.fNestedPaging)
3894 {
3895 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
3896
3897 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
3898 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
3899 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
3900 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
3901
3902 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
3903 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
3904 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
3905
3906 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
3907 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
3908 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x3f) == 0, /* Bits 6:11 MBZ. */
3909 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3910
3911 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
3912 AssertRCReturn(rc, rc);
3913 Log4(("Load[%RU32]: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->idCpu, pVCpu->hm.s.vmx.HCPhysEPTP));
3914
3915 if ( pVM->hm.s.vmx.fUnrestrictedGuest
3916 || CPUMIsGuestPagingEnabledEx(pMixedCtx))
3917 {
3918 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
3919 if (CPUMIsGuestInPAEModeEx(pMixedCtx))
3920 {
3921 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); AssertRCReturn(rc, rc);
3922 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
3923 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
3924 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
3925 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
3926 }
3927
3928 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
3929 have Unrestricted Execution to handle the guest when it's not using paging. */
3930 GCPhysGuestCR3 = pMixedCtx->cr3;
3931 }
3932 else
3933 {
3934 /*
3935 * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
3936 * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
3937 * EPT takes care of translating it to host-physical addresses.
3938 */
3939 RTGCPHYS GCPhys;
3940 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
3941 Assert(PDMVmmDevHeapIsEnabled(pVM));
3942
3943 /* We obtain it here every time as the guest could have relocated this PCI region. */
3944 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
3945 AssertRCReturn(rc, rc);
3946
3947 GCPhysGuestCR3 = GCPhys;
3948 }
3949
3950 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RGv (GstN)\n", pVCpu->idCpu, GCPhysGuestCR3));
3951 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
3952 }
3953 else
3954 {
3955 /* Non-nested paging case, just use the hypervisor's CR3. */
3956 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
3957
3958 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", pVCpu->idCpu, HCPhysGuestCR3));
3959 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
3960 }
3961 AssertRCReturn(rc, rc);
3962
3963 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
3964 }
3965
3966 /*
3967 * Guest CR4.
3968 */
3969 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
3970 {
3971 Assert(!(pMixedCtx->cr4 >> 32));
3972 uint32_t u32GuestCR4 = pMixedCtx->cr4;
3973
3974 /* The guest's view of its CR4 is unblemished. */
3975 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4);
3976 AssertRCReturn(rc, rc);
3977 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR4));
3978
3979 /* Setup VT-x's view of the guest CR4. */
3980 /*
3981 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
3982 * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
3983 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
3984 */
3985 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3986 {
3987 Assert(pVM->hm.s.vmx.pRealModeTSS);
3988 Assert(PDMVmmDevHeapIsEnabled(pVM));
3989 u32GuestCR4 &= ~X86_CR4_VME;
3990 }
3991
3992 if (pVM->hm.s.fNestedPaging)
3993 {
3994 if ( !CPUMIsGuestPagingEnabledEx(pMixedCtx)
3995 && !pVM->hm.s.vmx.fUnrestrictedGuest)
3996 {
3997 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
3998 u32GuestCR4 |= X86_CR4_PSE;
3999 /* Our identity mapping is a 32-bit page directory. */
4000 u32GuestCR4 &= ~X86_CR4_PAE;
4001 }
4002 /* else use guest CR4.*/
4003 }
4004 else
4005 {
4006 /*
4007 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
4008 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
4009 */
4010 switch (pVCpu->hm.s.enmShadowMode)
4011 {
4012 case PGMMODE_REAL: /* Real-mode. */
4013 case PGMMODE_PROTECTED: /* Protected mode without paging. */
4014 case PGMMODE_32_BIT: /* 32-bit paging. */
4015 {
4016 u32GuestCR4 &= ~X86_CR4_PAE;
4017 break;
4018 }
4019
4020 case PGMMODE_PAE: /* PAE paging. */
4021 case PGMMODE_PAE_NX: /* PAE paging with NX. */
4022 {
4023 u32GuestCR4 |= X86_CR4_PAE;
4024 break;
4025 }
4026
4027 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
4028 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
4029#ifdef VBOX_ENABLE_64_BITS_GUESTS
4030 break;
4031#endif
4032 default:
4033 AssertFailed();
4034 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4035 }
4036 }
4037
4038 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
4039 uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
4040 uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
4041 u32GuestCR4 |= uSetCR4;
4042 u32GuestCR4 &= uZapCR4;
4043
4044 /* Write VT-x's view of the guest CR4 into the VMCS. */
4045 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", pVCpu->idCpu, u32GuestCR4, uSetCR4, uZapCR4));
4046 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4);
4047 AssertRCReturn(rc, rc);
4048
4049 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM-exit. */
4050 uint32_t u32CR4Mask = 0;
4051 u32CR4Mask = X86_CR4_VME
4052 | X86_CR4_PAE
4053 | X86_CR4_PGE
4054 | X86_CR4_PSE
4055 | X86_CR4_VMXE;
4056 pVCpu->hm.s.vmx.u32CR4Mask = u32CR4Mask;
4057 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask);
4058 AssertRCReturn(rc, rc);
4059
4060 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
4061 }
4062 return rc;
4063}
4064
4065
4066/**
4067 * Loads the guest debug registers into the guest-state area in the VMCS.
4068 * This also sets up whether #DB and MOV DRx accesses cause VM-exits.
4069 *
4070 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
4071 *
4072 * @returns VBox status code.
4073 * @param pVCpu Pointer to the VMCPU.
4074 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4075 * out-of-sync. Make sure to update the required fields
4076 * before using them.
4077 *
4078 * @remarks No-long-jump zone!!!
4079 */
4080static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4081{
4082 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
4083 return VINF_SUCCESS;
4084
4085#ifdef VBOX_STRICT
4086 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
4087 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
4088 {
4089 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
4090 Assert((pMixedCtx->dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0); /* Bits 63:32, 15, 14, 12, 11 are reserved. */
4091 Assert((pMixedCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); /* Bit 10 is reserved (RA1). */
4092 }
4093#endif
4094
4095 int rc;
4096 PVM pVM = pVCpu->CTX_SUFF(pVM);
4097 bool fInterceptDB = false;
4098 bool fInterceptMovDRx = false;
4099 if ( pVCpu->hm.s.fSingleInstruction
4100 || DBGFIsStepping(pVCpu))
4101 {
4102 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
4103 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
4104 {
4105 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
4106 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
4107 AssertRCReturn(rc, rc);
4108 Assert(fInterceptDB == false);
4109 }
4110 else
4111 {
4112 pMixedCtx->eflags.u32 |= X86_EFL_TF;
4113 pVCpu->hm.s.fClearTrapFlag = true;
4114 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
4115 fInterceptDB = true;
4116 }
4117 }
4118
4119 if ( fInterceptDB
4120 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
4121 {
4122 /*
4123 * Use the combined guest and host DRx values found in the hypervisor
4124 * register set because the debugger has breakpoints active or someone
4125 * is single stepping on the host side without a monitor trap flag.
4126 *
4127 * Note! DBGF expects a clean DR6 state before executing guest code.
4128 */
4129#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4130 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
4131 && !CPUMIsHyperDebugStateActivePending(pVCpu))
4132 {
4133 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4134 Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
4135 Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
4136 }
4137 else
4138#endif
4139 if (!CPUMIsHyperDebugStateActive(pVCpu))
4140 {
4141 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4142 Assert(CPUMIsHyperDebugStateActive(pVCpu));
4143 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
4144 }
4145
4146 /* Update DR7. (The other DRx values are handled by CPUM one way or the other.) */
4147 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)CPUMGetHyperDR7(pVCpu));
4148 AssertRCReturn(rc, rc);
4149
4150 pVCpu->hm.s.fUsingHyperDR7 = true;
4151 fInterceptDB = true;
4152 fInterceptMovDRx = true;
4153 }
4154 else
4155 {
4156 /*
4157 * If the guest has enabled debug registers, we need to load them prior to
4158 * executing guest code so they'll trigger at the right time.
4159 */
4160 if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
4161 {
4162#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4163 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
4164 && !CPUMIsGuestDebugStateActivePending(pVCpu))
4165 {
4166 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4167 Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
4168 Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
4169 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4170 }
4171 else
4172#endif
4173 if (!CPUMIsGuestDebugStateActive(pVCpu))
4174 {
4175 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4176 Assert(CPUMIsGuestDebugStateActive(pVCpu));
4177 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
4178 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4179 }
4180 Assert(!fInterceptDB);
4181 Assert(!fInterceptMovDRx);
4182 }
4183 /*
4184 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
4185 * must intercept #DB in order to maintain a correct DR6 guest value.
4186 */
4187#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4188 else if ( !CPUMIsGuestDebugStateActivePending(pVCpu)
4189 && !CPUMIsGuestDebugStateActive(pVCpu))
4190#else
4191 else if (!CPUMIsGuestDebugStateActive(pVCpu))
4192#endif
4193 {
4194 fInterceptMovDRx = true;
4195 fInterceptDB = true;
4196 }
4197
4198 /* Update guest DR7. */
4199 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
4200 AssertRCReturn(rc, rc);
4201
4202 pVCpu->hm.s.fUsingHyperDR7 = false;
4203 }
4204
4205 /*
4206 * Update the exception bitmap regarding intercepting #DB generated by the guest.
4207 */
4208 if (fInterceptDB)
4209 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
4210 else if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4211 {
4212#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
4213 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
4214#endif
4215 }
4216 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
4217 AssertRCReturn(rc, rc);
4218
4219 /*
4220 * Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions.
4221 */
4222 if (fInterceptMovDRx)
4223 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4224 else
4225 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4226 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
4227 AssertRCReturn(rc, rc);
4228
4229 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
4230 return VINF_SUCCESS;
4231}
4232
4233
4234#ifdef VBOX_STRICT
4235/**
4236 * Strict function to validate segment registers.
4237 *
4238 * @remarks ASSUMES CR0 is up to date.
4239 */
4240static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4241{
4242 /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
4243 /* NOTE: The reason we check for attribute value 0 and not just the unusable bit here is because hmR0VmxWriteSegmentReg()
4244 * only updates the VMCS' copy of the value with the unusable bit and doesn't change the guest-context value. */
4245 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
4246 && ( !CPUMIsGuestInRealModeEx(pCtx)
4247 && !CPUMIsGuestInV86ModeEx(pCtx)))
4248 {
4249 /* Protected mode checks */
4250 /* CS */
4251 Assert(pCtx->cs.Attr.n.u1Present);
4252 Assert(!(pCtx->cs.Attr.u & 0xf00));
4253 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
4254 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4255 || !(pCtx->cs.Attr.n.u1Granularity));
4256 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
4257 || (pCtx->cs.Attr.n.u1Granularity));
4258 /* CS cannot be loaded with NULL in protected mode. */
4259 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS?!? */
4260 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4261 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
4262 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4263 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
4264 else
4265 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
4266 /* SS */
4267 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4268 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
4269 if ( !(pCtx->cr0 & X86_CR0_PE)
4270 || pCtx->cs.Attr.n.u4Type == 3)
4271 {
4272 Assert(!pCtx->ss.Attr.n.u2Dpl);
4273 }
4274 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4275 {
4276 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4277 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
4278 Assert(pCtx->ss.Attr.n.u1Present);
4279 Assert(!(pCtx->ss.Attr.u & 0xf00));
4280 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
4281 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4282 || !(pCtx->ss.Attr.n.u1Granularity));
4283 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
4284 || (pCtx->ss.Attr.n.u1Granularity));
4285 }
4286 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
4287 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4288 {
4289 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4290 Assert(pCtx->ds.Attr.n.u1Present);
4291 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
4292 Assert(!(pCtx->ds.Attr.u & 0xf00));
4293 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
4294 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4295 || !(pCtx->ds.Attr.n.u1Granularity));
4296 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
4297 || (pCtx->ds.Attr.n.u1Granularity));
4298 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4299 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
4300 }
4301 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4302 {
4303 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4304 Assert(pCtx->es.Attr.n.u1Present);
4305 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
4306 Assert(!(pCtx->es.Attr.u & 0xf00));
4307 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
4308 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
4309 || !(pCtx->es.Attr.n.u1Granularity));
4310 Assert( !(pCtx->es.u32Limit & 0xfff00000)
4311 || (pCtx->es.Attr.n.u1Granularity));
4312 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4313 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
4314 }
4315 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
4316 {
4317 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4318 Assert(pCtx->fs.Attr.n.u1Present);
4319 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
4320 Assert(!(pCtx->fs.Attr.u & 0xf00));
4321 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
4322 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
4323 || !(pCtx->fs.Attr.n.u1Granularity));
4324 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
4325 || (pCtx->fs.Attr.n.u1Granularity));
4326 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4327 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4328 }
4329 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
4330 {
4331 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4332 Assert(pCtx->gs.Attr.n.u1Present);
4333 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
4334 Assert(!(pCtx->gs.Attr.u & 0xf00));
4335 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
4336 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
4337 || !(pCtx->gs.Attr.n.u1Granularity));
4338 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
4339 || (pCtx->gs.Attr.n.u1Granularity));
4340 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4341 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4342 }
4343 /* 64-bit capable CPUs. */
4344# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4345 if (HMVMX_IS_64BIT_HOST_MODE())
4346 {
4347 Assert(!(pCtx->cs.u64Base >> 32));
4348 Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
4349 Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
4350 Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
4351 }
4352# endif
4353 }
4354 else if ( CPUMIsGuestInV86ModeEx(pCtx)
4355 || ( CPUMIsGuestInRealModeEx(pCtx)
4356 && !pVM->hm.s.vmx.fUnrestrictedGuest))
4357 {
4358 /* Real and v86 mode checks. */
4359 /* hmR0VmxWriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
4360 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
4361 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4362 {
4363 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
4364 }
4365 else
4366 {
4367 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
4368 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
4369 }
4370
4371 /* CS */
4372 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
4373 Assert(pCtx->cs.u32Limit == 0xffff);
4374 Assert(u32CSAttr == 0xf3);
4375 /* SS */
4376 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
4377 Assert(pCtx->ss.u32Limit == 0xffff);
4378 Assert(u32SSAttr == 0xf3);
4379 /* DS */
4380 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
4381 Assert(pCtx->ds.u32Limit == 0xffff);
4382 Assert(u32DSAttr == 0xf3);
4383 /* ES */
4384 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
4385 Assert(pCtx->es.u32Limit == 0xffff);
4386 Assert(u32ESAttr == 0xf3);
4387 /* FS */
4388 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
4389 Assert(pCtx->fs.u32Limit == 0xffff);
4390 Assert(u32FSAttr == 0xf3);
4391 /* GS */
4392 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
4393 Assert(pCtx->gs.u32Limit == 0xffff);
4394 Assert(u32GSAttr == 0xf3);
4395 /* 64-bit capable CPUs. */
4396# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4397 if (HMVMX_IS_64BIT_HOST_MODE())
4398 {
4399 Assert(!(pCtx->cs.u64Base >> 32));
4400 Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
4401 Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
4402 Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
4403 }
4404# endif
4405 }
4406}
4407#endif /* VBOX_STRICT */
4408
4409
4410/**
4411 * Writes a guest segment register into the guest-state area in the VMCS.
4412 *
4413 * @returns VBox status code.
4414 * @param pVCpu Pointer to the VMCPU.
4415 * @param idxSel Index of the selector in the VMCS.
4416 * @param idxLimit Index of the segment limit in the VMCS.
4417 * @param idxBase Index of the segment base in the VMCS.
4418 * @param idxAccess Index of the access rights of the segment in the VMCS.
4419 * @param pSelReg Pointer to the segment selector.
4420 *
4421 * @remarks No-long-jump zone!!!
4422 */
4423static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
4424 uint32_t idxAccess, PCPUMSELREG pSelReg)
4425{
4426 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
4427 AssertRCReturn(rc, rc);
4428 rc = VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
4429 AssertRCReturn(rc, rc);
4430 rc = VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
4431 AssertRCReturn(rc, rc);
4432
4433 uint32_t u32Access = pSelReg->Attr.u;
4434 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4435 {
4436 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
4437 u32Access = 0xf3;
4438 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
4439 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
4440 }
4441 else
4442 {
4443 /*
4444 * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
4445 * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
4446 * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
4447 * loaded in protected-mode have their attribute as 0.
4448 */
4449 if (!u32Access)
4450 u32Access = X86DESCATTR_UNUSABLE;
4451 }
4452
4453 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
4454 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
4455 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
4456
4457 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
4458 AssertRCReturn(rc, rc);
4459 return rc;
4460}
4461
4462
4463/**
4464 * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
4465 * into the guest-state area in the VMCS.
4466 *
4467 * @returns VBox status code.
4468 * @param pVM Pointer to the VM.
4469 * @param pVCPU Pointer to the VMCPU.
4470 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4471 * out-of-sync. Make sure to update the required fields
4472 * before using them.
4473 *
4474 * @remarks ASSUMES pMixedCtx->cr0 is up to date (strict builds validation).
4475 * @remarks No-long-jump zone!!!
4476 */
4477static int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4478{
4479 int rc = VERR_INTERNAL_ERROR_5;
4480 PVM pVM = pVCpu->CTX_SUFF(pVM);
4481
4482 /*
4483 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
4484 */
4485 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))
4486 {
4487 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
4488 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4489 {
4490 pVCpu->hm.s.vmx.RealMode.AttrCS.u = pMixedCtx->cs.Attr.u;
4491 pVCpu->hm.s.vmx.RealMode.AttrSS.u = pMixedCtx->ss.Attr.u;
4492 pVCpu->hm.s.vmx.RealMode.AttrDS.u = pMixedCtx->ds.Attr.u;
4493 pVCpu->hm.s.vmx.RealMode.AttrES.u = pMixedCtx->es.Attr.u;
4494 pVCpu->hm.s.vmx.RealMode.AttrFS.u = pMixedCtx->fs.Attr.u;
4495 pVCpu->hm.s.vmx.RealMode.AttrGS.u = pMixedCtx->gs.Attr.u;
4496 }
4497
4498#ifdef VBOX_WITH_REM
4499 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
4500 {
4501 Assert(pVM->hm.s.vmx.pRealModeTSS);
4502 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
4503 if ( pVCpu->hm.s.vmx.fWasInRealMode
4504 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
4505 {
4506 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
4507 in real-mode (e.g. OpenBSD 4.0) */
4508 REMFlushTBs(pVM);
4509 Log4(("Load[%RU32]: Switch to protected mode detected!\n", pVCpu->idCpu));
4510 pVCpu->hm.s.vmx.fWasInRealMode = false;
4511 }
4512 }
4513#endif
4514 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
4515 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs);
4516 AssertRCReturn(rc, rc);
4517 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
4518 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss);
4519 AssertRCReturn(rc, rc);
4520 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
4521 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds);
4522 AssertRCReturn(rc, rc);
4523 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
4524 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es);
4525 AssertRCReturn(rc, rc);
4526 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
4527 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs);
4528 AssertRCReturn(rc, rc);
4529 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
4530 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs);
4531 AssertRCReturn(rc, rc);
4532
4533#ifdef VBOX_STRICT
4534 /* Validate. */
4535 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
4536#endif
4537
4538 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);
4539 Log4(("Load[%RU32]: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pVCpu->idCpu, pMixedCtx->cs.Sel,
4540 pMixedCtx->cs.u64Base, pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
4541 }
4542
4543 /*
4544 * Guest TR.
4545 */
4546 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))
4547 {
4548 /*
4549 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
4550 * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
4551 * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
4552 */
4553 uint16_t u16Sel = 0;
4554 uint32_t u32Limit = 0;
4555 uint64_t u64Base = 0;
4556 uint32_t u32AccessRights = 0;
4557
4558 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4559 {
4560 u16Sel = pMixedCtx->tr.Sel;
4561 u32Limit = pMixedCtx->tr.u32Limit;
4562 u64Base = pMixedCtx->tr.u64Base;
4563 u32AccessRights = pMixedCtx->tr.Attr.u;
4564 }
4565 else
4566 {
4567 Assert(pVM->hm.s.vmx.pRealModeTSS);
4568 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
4569
4570 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
4571 RTGCPHYS GCPhys;
4572 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
4573 AssertRCReturn(rc, rc);
4574
4575 X86DESCATTR DescAttr;
4576 DescAttr.u = 0;
4577 DescAttr.n.u1Present = 1;
4578 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
4579
4580 u16Sel = 0;
4581 u32Limit = HM_VTX_TSS_SIZE;
4582 u64Base = GCPhys; /* in real-mode phys = virt. */
4583 u32AccessRights = DescAttr.u;
4584 }
4585
4586 /* Validate. */
4587 Assert(!(u16Sel & RT_BIT(2)));
4588 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
4589 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
4590 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
4591 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
4592 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
4593 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
4594 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
4595 Assert( (u32Limit & 0xfff) == 0xfff
4596 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
4597 Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000)
4598 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
4599
4600 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel); AssertRCReturn(rc, rc);
4601 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRCReturn(rc, rc);
4602 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRCReturn(rc, rc);
4603 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRCReturn(rc, rc);
4604
4605 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);
4606 Log4(("Load[%RU32]: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", pVCpu->idCpu, u64Base));
4607 }
4608
4609 /*
4610 * Guest GDTR.
4611 */
4612 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))
4613 {
4614 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); AssertRCReturn(rc, rc);
4615 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); AssertRCReturn(rc, rc);
4616
4617 /* Validate. */
4618 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4619
4620 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);
4621 Log4(("Load[%RU32]: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->gdtr.pGdt));
4622 }
4623
4624 /*
4625 * Guest LDTR.
4626 */
4627 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))
4628 {
4629 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
4630 uint32_t u32Access = 0;
4631 if (!pMixedCtx->ldtr.Attr.u)
4632 u32Access = X86DESCATTR_UNUSABLE;
4633 else
4634 u32Access = pMixedCtx->ldtr.Attr.u;
4635
4636 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel); AssertRCReturn(rc, rc);
4637 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit); AssertRCReturn(rc, rc);
4638 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base); AssertRCReturn(rc, rc);
4639 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRCReturn(rc, rc);
4640
4641 /* Validate. */
4642 if (!(u32Access & X86DESCATTR_UNUSABLE))
4643 {
4644 Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
4645 Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
4646 Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
4647 Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
4648 Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
4649 Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
4650 Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
4651 || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
4652 Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
4653 || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
4654 }
4655
4656 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);
4657 Log4(("Load[%RU32]: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->ldtr.u64Base));
4658 }
4659
4660 /*
4661 * Guest IDTR.
4662 */
4663 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))
4664 {
4665 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); AssertRCReturn(rc, rc);
4666 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); AssertRCReturn(rc, rc);
4667
4668 /* Validate. */
4669 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4670
4671 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR);
4672 Log4(("Load[%RU32]: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->idtr.pIdt));
4673 }
4674
4675 return VINF_SUCCESS;
4676}
4677
4678
4679/**
4680 * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
4681 * areas. These MSRs will automatically be loaded to the host CPU on every
4682 * successful VM entry and stored from the host CPU on every successful VM-exit.
4683 *
4684 * This also creates/updates MSR slots for the host MSRs. The actual host
4685 * MSR values are -not- updated here for performance reasons. See
4686 * hmR0VmxSaveHostMsrs().
4687 *
4688 * Also loads the sysenter MSRs into the guest-state area in the VMCS.
4689 *
4690 * @returns VBox status code.
4691 * @param pVCpu Pointer to the VMCPU.
4692 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4693 * out-of-sync. Make sure to update the required fields
4694 * before using them.
4695 *
4696 * @remarks No-long-jump zone!!!
4697 */
4698static int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4699{
4700 AssertPtr(pVCpu);
4701 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
4702
4703 /*
4704 * MSRs that we use the auto-load/store MSR area in the VMCS.
4705 */
4706 PVM pVM = pVCpu->CTX_SUFF(pVM);
4707 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS))
4708 {
4709 /* For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(). */
4710#if HC_ARCH_BITS == 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4711 if (pVM->hm.s.fAllow64BitGuests)
4712 {
4713 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false /* fUpdateHostMsr */);
4714 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false /* fUpdateHostMsr */);
4715 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pMixedCtx->msrSFMASK, false /* fUpdateHostMsr */);
4716 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false /* fUpdateHostMsr */);
4717# ifdef DEBUG
4718 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
4719 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
4720 {
4721 Log4(("Load[%RU32]: MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", pVCpu->idCpu, i, pMsr->u32Msr,
4722 pMsr->u64Value));
4723 }
4724# endif
4725 }
4726#endif
4727 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
4728 }
4729
4730 /*
4731 * Guest Sysenter MSRs.
4732 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
4733 * VM-exits on WRMSRs for these MSRs.
4734 */
4735 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR))
4736 {
4737 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc);
4738 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);
4739 }
4740
4741 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR))
4742 {
4743 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc);
4744 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
4745 }
4746
4747 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR))
4748 {
4749 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc);
4750 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
4751 }
4752
4753 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR))
4754 {
4755 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
4756 {
4757 /*
4758 * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option
4759 * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}.
4760 */
4761 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
4762 {
4763 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER);
4764 AssertRCReturn(rc,rc);
4765 Log4(("Load[%RU32]: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pVCpu->idCpu, pMixedCtx->msrEFER));
4766 }
4767 else
4768 {
4769 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */);
4770 /* We need to intercept reads too, see @bugref{7386} comment #16. */
4771 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
4772 Log4(("Load[%RU32]: MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", pVCpu->idCpu, MSR_K6_EFER,
4773 pMixedCtx->msrEFER, pVCpu->hm.s.vmx.cMsrs));
4774 }
4775 }
4776 else if (!pVM->hm.s.vmx.fSupportsVmcsEfer)
4777 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER);
4778 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
4779 }
4780
4781 return VINF_SUCCESS;
4782}
4783
4784
4785/**
4786 * Loads the guest activity state into the guest-state area in the VMCS.
4787 *
4788 * @returns VBox status code.
4789 * @param pVCpu Pointer to the VMCPU.
4790 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4791 * out-of-sync. Make sure to update the required fields
4792 * before using them.
4793 *
4794 * @remarks No-long-jump zone!!!
4795 */
4796static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx)
4797{
4798 NOREF(pCtx);
4799 /** @todo See if we can make use of other states, e.g.
4800 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */
4801 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE))
4802 {
4803 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
4804 AssertRCReturn(rc, rc);
4805
4806 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE);
4807 }
4808 return VINF_SUCCESS;
4809}
4810
4811
4812/**
4813 * Sets up the appropriate function to run guest code.
4814 *
4815 * @returns VBox status code.
4816 * @param pVCpu Pointer to the VMCPU.
4817 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4818 * out-of-sync. Make sure to update the required fields
4819 * before using them.
4820 *
4821 * @remarks No-long-jump zone!!!
4822 */
4823static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4824{
4825 if (CPUMIsGuestInLongModeEx(pMixedCtx))
4826 {
4827#ifndef VBOX_ENABLE_64_BITS_GUESTS
4828 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4829#endif
4830 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
4831#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4832 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
4833 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
4834 {
4835 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4836 {
4837 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4838 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT
4839 | HM_CHANGED_VMX_EXIT_CTLS
4840 | HM_CHANGED_VMX_ENTRY_CTLS
4841 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
4842 }
4843 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
4844 }
4845#else
4846 /* 64-bit host or hybrid host. */
4847 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
4848#endif
4849 }
4850 else
4851 {
4852 /* Guest is not in long mode, use the 32-bit handler. */
4853#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4854 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32)
4855 {
4856 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4857 {
4858 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4859 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT
4860 | HM_CHANGED_VMX_EXIT_CTLS
4861 | HM_CHANGED_VMX_ENTRY_CTLS
4862 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
4863 }
4864 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4865 }
4866#else
4867 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4868#endif
4869 }
4870 Assert(pVCpu->hm.s.vmx.pfnStartVM);
4871 return VINF_SUCCESS;
4872}
4873
4874
4875/**
4876 * Wrapper for running the guest code in VT-x.
4877 *
4878 * @returns VBox strict status code.
4879 * @param pVM Pointer to the VM.
4880 * @param pVCpu Pointer to the VMCPU.
4881 * @param pCtx Pointer to the guest-CPU context.
4882 *
4883 * @remarks No-long-jump zone!!!
4884 */
4885DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4886{
4887 /*
4888 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
4889 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
4890 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
4891 */
4892 bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
4893 /** @todo Add stats for resume vs launch. */
4894#ifdef VBOX_WITH_KERNEL_USING_XMM
4895 return HMR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
4896#else
4897 return pVCpu->hm.s.vmx.pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
4898#endif
4899}
4900
4901
4902/**
4903 * Reports world-switch error and dumps some useful debug info.
4904 *
4905 * @param pVM Pointer to the VM.
4906 * @param pVCpu Pointer to the VMCPU.
4907 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
4908 * @param pCtx Pointer to the guest-CPU context.
4909 * @param pVmxTransient Pointer to the VMX transient structure (only
4910 * exitReason updated).
4911 */
4912static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
4913{
4914 Assert(pVM);
4915 Assert(pVCpu);
4916 Assert(pCtx);
4917 Assert(pVmxTransient);
4918 HMVMX_ASSERT_PREEMPT_SAFE();
4919
4920 Log4(("VM-entry failure: %Rrc\n", rcVMRun));
4921 switch (rcVMRun)
4922 {
4923 case VERR_VMX_INVALID_VMXON_PTR:
4924 AssertFailed();
4925 break;
4926 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
4927 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
4928 {
4929 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
4930 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
4931 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
4932 AssertRC(rc);
4933
4934 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
4935 /* LastError.idCurrentCpu was already updated in hmR0VmxPreRunGuestCommitted().
4936 Cannot do it here as we may have been long preempted. */
4937
4938#ifdef VBOX_STRICT
4939 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
4940 pVmxTransient->uExitReason));
4941 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification));
4942 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
4943 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
4944 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));
4945 else
4946 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
4947 Log4(("Entered host CPU %u\n", pVCpu->hm.s.vmx.LastError.idEnteredCpu));
4948 Log4(("Current host CPU %u\n", pVCpu->hm.s.vmx.LastError.idCurrentCpu));
4949
4950 /* VMX control bits. */
4951 uint32_t u32Val;
4952 uint64_t u64Val;
4953 HMVMXHCUINTREG uHCReg;
4954 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc);
4955 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));
4956 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc);
4957 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));
4958 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc);
4959 Log4(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));
4960 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc);
4961 Log4(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));
4962 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc);
4963 Log4(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));
4964 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
4965 Log4(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
4966 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
4967 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
4968 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
4969 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
4970 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
4971 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
4972 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
4973 Log4(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
4974 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
4975 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
4976 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4977 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
4978 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4979 Log4(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
4980 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
4981 Log4(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
4982 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
4983 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
4984 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
4985 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
4986 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
4987 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
4988 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
4989 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4990 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
4991 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
4992 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
4993 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4994 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
4995 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
4996
4997 /* Guest bits. */
4998 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc);
4999 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pCtx->rip, u64Val));
5000 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc);
5001 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pCtx->rsp, u64Val));
5002 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
5003 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
5004 rc = VMXReadVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, &u32Val); AssertRC(rc);
5005 Log4(("VMX_VMCS16_GUEST_FIELD_VPID %u\n", u32Val));
5006
5007 /* Host bits. */
5008 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
5009 Log4(("Host CR0 %#RHr\n", uHCReg));
5010 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
5011 Log4(("Host CR3 %#RHr\n", uHCReg));
5012 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
5013 Log4(("Host CR4 %#RHr\n", uHCReg));
5014
5015 RTGDTR HostGdtr;
5016 PCX86DESCHC pDesc;
5017 ASMGetGDTR(&HostGdtr);
5018 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_CS, &u32Val); AssertRC(rc);
5019 Log4(("Host CS %#08x\n", u32Val));
5020 if (u32Val < HostGdtr.cbGdt)
5021 {
5022 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5023 HMR0DumpDescriptor(pDesc, u32Val, "CS: ");
5024 }
5025
5026 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_DS, &u32Val); AssertRC(rc);
5027 Log4(("Host DS %#08x\n", u32Val));
5028 if (u32Val < HostGdtr.cbGdt)
5029 {
5030 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5031 HMR0DumpDescriptor(pDesc, u32Val, "DS: ");
5032 }
5033
5034 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_ES, &u32Val); AssertRC(rc);
5035 Log4(("Host ES %#08x\n", u32Val));
5036 if (u32Val < HostGdtr.cbGdt)
5037 {
5038 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5039 HMR0DumpDescriptor(pDesc, u32Val, "ES: ");
5040 }
5041
5042 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_FS, &u32Val); AssertRC(rc);
5043 Log4(("Host FS %#08x\n", u32Val));
5044 if (u32Val < HostGdtr.cbGdt)
5045 {
5046 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5047 HMR0DumpDescriptor(pDesc, u32Val, "FS: ");
5048 }
5049
5050 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_GS, &u32Val); AssertRC(rc);
5051 Log4(("Host GS %#08x\n", u32Val));
5052 if (u32Val < HostGdtr.cbGdt)
5053 {
5054 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5055 HMR0DumpDescriptor(pDesc, u32Val, "GS: ");
5056 }
5057
5058 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_SS, &u32Val); AssertRC(rc);
5059 Log4(("Host SS %#08x\n", u32Val));
5060 if (u32Val < HostGdtr.cbGdt)
5061 {
5062 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5063 HMR0DumpDescriptor(pDesc, u32Val, "SS: ");
5064 }
5065
5066 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_TR, &u32Val); AssertRC(rc);
5067 Log4(("Host TR %#08x\n", u32Val));
5068 if (u32Val < HostGdtr.cbGdt)
5069 {
5070 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5071 HMR0DumpDescriptor(pDesc, u32Val, "TR: ");
5072 }
5073
5074 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
5075 Log4(("Host TR Base %#RHv\n", uHCReg));
5076 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
5077 Log4(("Host GDTR Base %#RHv\n", uHCReg));
5078 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
5079 Log4(("Host IDTR Base %#RHv\n", uHCReg));
5080 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
5081 Log4(("Host SYSENTER CS %#08x\n", u32Val));
5082 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
5083 Log4(("Host SYSENTER EIP %#RHv\n", uHCReg));
5084 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
5085 Log4(("Host SYSENTER ESP %#RHv\n", uHCReg));
5086 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
5087 Log4(("Host RSP %#RHv\n", uHCReg));
5088 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
5089 Log4(("Host RIP %#RHv\n", uHCReg));
5090# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
5091 if (HMVMX_IS_64BIT_HOST_MODE())
5092 {
5093 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
5094 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
5095 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
5096 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
5097 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
5098 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
5099 }
5100# endif
5101#endif /* VBOX_STRICT */
5102 break;
5103 }
5104
5105 default:
5106 /* Impossible */
5107 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
5108 break;
5109 }
5110 NOREF(pVM); NOREF(pCtx);
5111}
5112
5113
5114#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
5115#ifndef VMX_USE_CACHED_VMCS_ACCESSES
5116# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
5117#endif
5118#ifdef VBOX_STRICT
5119static bool hmR0VmxIsValidWriteField(uint32_t idxField)
5120{
5121 switch (idxField)
5122 {
5123 case VMX_VMCS_GUEST_RIP:
5124 case VMX_VMCS_GUEST_RSP:
5125 case VMX_VMCS_GUEST_SYSENTER_EIP:
5126 case VMX_VMCS_GUEST_SYSENTER_ESP:
5127 case VMX_VMCS_GUEST_GDTR_BASE:
5128 case VMX_VMCS_GUEST_IDTR_BASE:
5129 case VMX_VMCS_GUEST_CS_BASE:
5130 case VMX_VMCS_GUEST_DS_BASE:
5131 case VMX_VMCS_GUEST_ES_BASE:
5132 case VMX_VMCS_GUEST_FS_BASE:
5133 case VMX_VMCS_GUEST_GS_BASE:
5134 case VMX_VMCS_GUEST_SS_BASE:
5135 case VMX_VMCS_GUEST_LDTR_BASE:
5136 case VMX_VMCS_GUEST_TR_BASE:
5137 case VMX_VMCS_GUEST_CR3:
5138 return true;
5139 }
5140 return false;
5141}
5142
5143static bool hmR0VmxIsValidReadField(uint32_t idxField)
5144{
5145 switch (idxField)
5146 {
5147 /* Read-only fields. */
5148 case VMX_VMCS_RO_EXIT_QUALIFICATION:
5149 return true;
5150 }
5151 /* Remaining readable fields should also be writable. */
5152 return hmR0VmxIsValidWriteField(idxField);
5153}
5154#endif /* VBOX_STRICT */
5155
5156
5157/**
5158 * Executes the specified handler in 64-bit mode.
5159 *
5160 * @returns VBox status code.
5161 * @param pVM Pointer to the VM.
5162 * @param pVCpu Pointer to the VMCPU.
5163 * @param pCtx Pointer to the guest CPU context.
5164 * @param enmOp The operation to perform.
5165 * @param cbParam Number of parameters.
5166 * @param paParam Array of 32-bit parameters.
5167 */
5168VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
5169 uint32_t *paParam)
5170{
5171 int rc, rc2;
5172 PHMGLOBALCPUINFO pCpu;
5173 RTHCPHYS HCPhysCpuPage;
5174 RTCCUINTREG uOldEflags;
5175
5176 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
5177 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
5178 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
5179 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
5180
5181#ifdef VBOX_STRICT
5182 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
5183 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
5184
5185 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
5186 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
5187#endif
5188
5189 /* Disable interrupts. */
5190 uOldEflags = ASMIntDisableFlags();
5191
5192#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
5193 RTCPUID idHostCpu = RTMpCpuId();
5194 CPUMR0SetLApic(pVCpu, idHostCpu);
5195#endif
5196
5197 pCpu = HMR0GetCurrentCpu();
5198 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
5199
5200 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
5201 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5202
5203 /* Leave VMX Root Mode. */
5204 VMXDisable();
5205
5206 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
5207
5208 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
5209 CPUMSetHyperEIP(pVCpu, enmOp);
5210 for (int i = (int)cbParam - 1; i >= 0; i--)
5211 CPUMPushHyper(pVCpu, paParam[i]);
5212
5213 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
5214
5215 /* Call the switcher. */
5216 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
5217 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
5218
5219 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
5220 /* Make sure the VMX instructions don't cause #UD faults. */
5221 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
5222
5223 /* Re-enter VMX Root Mode */
5224 rc2 = VMXEnable(HCPhysCpuPage);
5225 if (RT_FAILURE(rc2))
5226 {
5227 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
5228 ASMSetFlags(uOldEflags);
5229 return rc2;
5230 }
5231
5232 rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5233 AssertRC(rc2);
5234 Assert(!(ASMGetFlags() & X86_EFL_IF));
5235 ASMSetFlags(uOldEflags);
5236 return rc;
5237}
5238
5239
5240/**
5241 * Prepares for and executes VMLAUNCH (64-bit guests) for 32-bit hosts
5242 * supporting 64-bit guests.
5243 *
5244 * @returns VBox status code.
5245 * @param fResume Whether to VMLAUNCH or VMRESUME.
5246 * @param pCtx Pointer to the guest-CPU context.
5247 * @param pCache Pointer to the VMCS cache.
5248 * @param pVM Pointer to the VM.
5249 * @param pVCpu Pointer to the VMCPU.
5250 */
5251DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
5252{
5253 uint32_t aParam[6];
5254 PHMGLOBALCPUINFO pCpu = NULL;
5255 RTHCPHYS HCPhysCpuPage = 0;
5256 int rc = VERR_INTERNAL_ERROR_5;
5257
5258 pCpu = HMR0GetCurrentCpu();
5259 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
5260
5261#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5262 pCache->uPos = 1;
5263 pCache->interPD = PGMGetInterPaeCR3(pVM);
5264 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
5265#endif
5266
5267#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5268 pCache->TestIn.HCPhysCpuPage = 0;
5269 pCache->TestIn.HCPhysVmcs = 0;
5270 pCache->TestIn.pCache = 0;
5271 pCache->TestOut.HCPhysVmcs = 0;
5272 pCache->TestOut.pCache = 0;
5273 pCache->TestOut.pCtx = 0;
5274 pCache->TestOut.eflags = 0;
5275#endif
5276
5277 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
5278 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */
5279 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
5280 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
5281 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
5282 aParam[5] = 0;
5283
5284#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5285 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
5286 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
5287#endif
5288 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, 6, &aParam[0]);
5289
5290#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5291 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
5292 Assert(pCtx->dr[4] == 10);
5293 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
5294#endif
5295
5296#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5297 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
5298 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5299 pVCpu->hm.s.vmx.HCPhysVmcs));
5300 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5301 pCache->TestOut.HCPhysVmcs));
5302 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
5303 pCache->TestOut.pCache));
5304 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
5305 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
5306 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
5307 pCache->TestOut.pCtx));
5308 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
5309#endif
5310 return rc;
5311}
5312
5313
5314/**
5315 * Initialize the VMCS-Read cache. The VMCS cache is used for 32-bit hosts
5316 * running 64-bit guests (except 32-bit Darwin which runs with 64-bit paging in
5317 * 32-bit mode) for 64-bit fields that cannot be accessed in 32-bit mode. Some
5318 * 64-bit fields -can- be accessed (those that have a 32-bit FULL & HIGH part).
5319 *
5320 * @returns VBox status code.
5321 * @param pVM Pointer to the VM.
5322 * @param pVCpu Pointer to the VMCPU.
5323 */
5324static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
5325{
5326#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
5327{ \
5328 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
5329 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
5330 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
5331 ++cReadFields; \
5332}
5333
5334 AssertPtr(pVM);
5335 AssertPtr(pVCpu);
5336 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5337 uint32_t cReadFields = 0;
5338
5339 /*
5340 * Don't remove the #if 0'd fields in this code. They're listed here for consistency
5341 * and serve to indicate exceptions to the rules.
5342 */
5343
5344 /* Guest-natural selector base fields. */
5345#if 0
5346 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
5347 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
5348 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
5349#endif
5350 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
5351 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
5352 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
5353 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
5354 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
5355 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
5356 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
5357 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
5358 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
5359 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
5360 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
5361 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
5362#if 0
5363 /* Unused natural width guest-state fields. */
5364 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
5365 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
5366#endif
5367 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
5368 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
5369
5370 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
5371#if 0
5372 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
5373 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
5374 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
5375 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
5376 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
5377 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
5378 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
5379 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
5380 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
5381#endif
5382
5383 /* Natural width guest-state fields. */
5384 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
5385#if 0
5386 /* Currently unused field. */
5387 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR);
5388#endif
5389
5390 if (pVM->hm.s.fNestedPaging)
5391 {
5392 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
5393 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
5394 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
5395 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
5396 }
5397 else
5398 {
5399 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
5400 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
5401 }
5402
5403#undef VMXLOCAL_INIT_READ_CACHE_FIELD
5404 return VINF_SUCCESS;
5405}
5406
5407
5408/**
5409 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
5410 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
5411 * darwin, running 64-bit guests).
5412 *
5413 * @returns VBox status code.
5414 * @param pVCpu Pointer to the VMCPU.
5415 * @param idxField The VMCS field encoding.
5416 * @param u64Val 16, 32 or 64-bit value.
5417 */
5418VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5419{
5420 int rc;
5421 switch (idxField)
5422 {
5423 /*
5424 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
5425 */
5426 /* 64-bit Control fields. */
5427 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
5428 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
5429 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
5430 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
5431 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
5432 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
5433 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
5434 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
5435 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
5436 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
5437 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
5438 case VMX_VMCS64_CTRL_EPTP_FULL:
5439 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
5440 /* 64-bit Guest-state fields. */
5441 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
5442 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
5443 case VMX_VMCS64_GUEST_PAT_FULL:
5444 case VMX_VMCS64_GUEST_EFER_FULL:
5445 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
5446 case VMX_VMCS64_GUEST_PDPTE0_FULL:
5447 case VMX_VMCS64_GUEST_PDPTE1_FULL:
5448 case VMX_VMCS64_GUEST_PDPTE2_FULL:
5449 case VMX_VMCS64_GUEST_PDPTE3_FULL:
5450 /* 64-bit Host-state fields. */
5451 case VMX_VMCS64_HOST_FIELD_PAT_FULL:
5452 case VMX_VMCS64_HOST_FIELD_EFER_FULL:
5453 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
5454 {
5455 rc = VMXWriteVmcs32(idxField, u64Val);
5456 rc |= VMXWriteVmcs32(idxField + 1, (uint32_t)(u64Val >> 32));
5457 break;
5458 }
5459
5460 /*
5461 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
5462 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
5463 */
5464 /* Natural-width Guest-state fields. */
5465 case VMX_VMCS_GUEST_CR3:
5466 case VMX_VMCS_GUEST_ES_BASE:
5467 case VMX_VMCS_GUEST_CS_BASE:
5468 case VMX_VMCS_GUEST_SS_BASE:
5469 case VMX_VMCS_GUEST_DS_BASE:
5470 case VMX_VMCS_GUEST_FS_BASE:
5471 case VMX_VMCS_GUEST_GS_BASE:
5472 case VMX_VMCS_GUEST_LDTR_BASE:
5473 case VMX_VMCS_GUEST_TR_BASE:
5474 case VMX_VMCS_GUEST_GDTR_BASE:
5475 case VMX_VMCS_GUEST_IDTR_BASE:
5476 case VMX_VMCS_GUEST_RSP:
5477 case VMX_VMCS_GUEST_RIP:
5478 case VMX_VMCS_GUEST_SYSENTER_ESP:
5479 case VMX_VMCS_GUEST_SYSENTER_EIP:
5480 {
5481 if (!(u64Val >> 32))
5482 {
5483 /* If this field is 64-bit, VT-x will zero out the top bits. */
5484 rc = VMXWriteVmcs32(idxField, (uint32_t)u64Val);
5485 }
5486 else
5487 {
5488 /* Assert that only the 32->64 switcher case should ever come here. */
5489 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
5490 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
5491 }
5492 break;
5493 }
5494
5495 default:
5496 {
5497 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
5498 rc = VERR_INVALID_PARAMETER;
5499 break;
5500 }
5501 }
5502 AssertRCReturn(rc, rc);
5503 return rc;
5504}
5505
5506
5507/**
5508 * Queue up a VMWRITE by using the VMCS write cache. This is only used on 32-bit
5509 * hosts (except darwin) for 64-bit guests.
5510 *
5511 * @param pVCpu Pointer to the VMCPU.
5512 * @param idxField The VMCS field encoding.
5513 * @param u64Val 16, 32 or 64-bit value.
5514 */
5515VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5516{
5517 AssertPtr(pVCpu);
5518 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5519
5520 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
5521 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
5522
5523 /* Make sure there are no duplicates. */
5524 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
5525 {
5526 if (pCache->Write.aField[i] == idxField)
5527 {
5528 pCache->Write.aFieldVal[i] = u64Val;
5529 return VINF_SUCCESS;
5530 }
5531 }
5532
5533 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
5534 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
5535 pCache->Write.cValidEntries++;
5536 return VINF_SUCCESS;
5537}
5538
5539/* Enable later when the assembly code uses these as callbacks. */
5540#if 0
5541/*
5542 * Loads the VMCS write-cache into the CPU (by executing VMWRITEs).
5543 *
5544 * @param pVCpu Pointer to the VMCPU.
5545 * @param pCache Pointer to the VMCS cache.
5546 *
5547 * @remarks No-long-jump zone!!!
5548 */
5549VMMR0DECL(void) VMXWriteCachedVmcsLoad(PVMCPU pVCpu, PVMCSCACHE pCache)
5550{
5551 AssertPtr(pCache);
5552 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
5553 {
5554 int rc = VMXWriteVmcs64(pCache->Write.aField[i], pCache->Write.aFieldVal[i]);
5555 AssertRC(rc);
5556 }
5557 pCache->Write.cValidEntries = 0;
5558}
5559
5560
5561/**
5562 * Stores the VMCS read-cache from the CPU (by executing VMREADs).
5563 *
5564 * @param pVCpu Pointer to the VMCPU.
5565 * @param pCache Pointer to the VMCS cache.
5566 *
5567 * @remarks No-long-jump zone!!!
5568 */
5569VMMR0DECL(void) VMXReadCachedVmcsStore(PVMCPU pVCpu, PVMCSCACHE pCache)
5570{
5571 AssertPtr(pCache);
5572 for (uint32_t i = 0; i < pCache->Read.cValidEntries; i++)
5573 {
5574 int rc = VMXReadVmcs64(pCache->Read.aField[i], &pCache->Read.aFieldVal[i]);
5575 AssertRC(rc);
5576 }
5577}
5578#endif
5579#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
5580
5581
5582/**
5583 * Sets up the usage of TSC-offsetting and updates the VMCS. If offsetting is
5584 * not possible, cause VM-exits on RDTSC(P)s. Also sets up the VMX preemption
5585 * timer.
5586 *
5587 * @returns VBox status code.
5588 * @param pVCpu Pointer to the VMCPU.
5589 *
5590 * @remarks No-long-jump zone!!!
5591 */
5592static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu)
5593{
5594 int rc = VERR_INTERNAL_ERROR_5;
5595 bool fOffsettedTsc = false;
5596 bool fParavirtTsc = false;
5597 PVM pVM = pVCpu->CTX_SUFF(pVM);
5598 if (pVM->hm.s.vmx.fUsePreemptTimer)
5599 {
5600 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &fParavirtTsc,
5601 &pVCpu->hm.s.vmx.u64TSCOffset);
5602
5603 /* Make sure the returned values have sane upper and lower boundaries. */
5604 uint64_t u64CpuHz = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
5605 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
5606 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
5607 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
5608
5609 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
5610 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc);
5611 }
5612 else
5613 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc);
5614
5615#if 1
5616 if (fParavirtTsc)
5617 {
5618#if 1
5619 uint64_t const u64CurTsc = ASMReadTSC();
5620 uint64_t const u64LastTick = TMCpuTickGetLastSeen(pVCpu);
5621 if (u64CurTsc + pVCpu->hm.s.vmx.u64TSCOffset < u64LastTick)
5622 {
5623 pVCpu->hm.s.vmx.u64TSCOffset = (u64LastTick - u64CurTsc);
5624 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffsetAdjusted);
5625 }
5626
5627 Assert(u64CurTsc + pVCpu->hm.s.vmx.u64TSCOffset >= u64LastTick);
5628#endif
5629 rc = GIMR0UpdateParavirtTsc(pVM, pVCpu->hm.s.vmx.u64TSCOffset);
5630 AssertRC(rc);
5631 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
5632 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); AssertRC(rc);
5633
5634 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5635 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5636 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
5637 }
5638 else
5639#else
5640 if (fParavirtTsc)
5641 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
5642#endif
5643 if (fOffsettedTsc)
5644 {
5645 uint64_t u64CurTSC = ASMReadTSC();
5646 if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
5647 {
5648 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
5649 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc);
5650
5651 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5652 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5653 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
5654 }
5655 else
5656 {
5657 /* VM-exit on RDTSC(P) as we would otherwise pass decreasing TSC values to the guest. */
5658 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5659 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5660 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
5661 }
5662 }
5663 else
5664 {
5665 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
5666 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5667 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5668 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
5669 }
5670}
5671
5672
5673/**
5674 * Determines if an exception is a contributory exception. Contributory
5675 * exceptions are ones which can cause double-faults. Page-fault is
5676 * intentionally not included here as it's a conditional contributory exception.
5677 *
5678 * @returns true if the exception is contributory, false otherwise.
5679 * @param uVector The exception vector.
5680 */
5681DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
5682{
5683 switch (uVector)
5684 {
5685 case X86_XCPT_GP:
5686 case X86_XCPT_SS:
5687 case X86_XCPT_NP:
5688 case X86_XCPT_TS:
5689 case X86_XCPT_DE:
5690 return true;
5691 default:
5692 break;
5693 }
5694 return false;
5695}
5696
5697
5698/**
5699 * Sets an event as a pending event to be injected into the guest.
5700 *
5701 * @param pVCpu Pointer to the VMCPU.
5702 * @param u32IntInfo The VM-entry interruption-information field.
5703 * @param cbInstr The VM-entry instruction length in bytes (for software
5704 * interrupts, exceptions and privileged software
5705 * exceptions).
5706 * @param u32ErrCode The VM-entry exception error code.
5707 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
5708 * page-fault.
5709 *
5710 * @remarks Statistics counter assumes this is a guest event being injected or
5711 * re-injected into the guest, i.e. 'StatInjectPendingReflect' is
5712 * always incremented.
5713 */
5714DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
5715 RTGCUINTPTR GCPtrFaultAddress)
5716{
5717 Assert(!pVCpu->hm.s.Event.fPending);
5718 pVCpu->hm.s.Event.fPending = true;
5719 pVCpu->hm.s.Event.u64IntInfo = u32IntInfo;
5720 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
5721 pVCpu->hm.s.Event.cbInstr = cbInstr;
5722 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
5723
5724 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
5725}
5726
5727
5728/**
5729 * Sets a double-fault (#DF) exception as pending-for-injection into the VM.
5730 *
5731 * @param pVCpu Pointer to the VMCPU.
5732 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5733 * out-of-sync. Make sure to update the required fields
5734 * before using them.
5735 */
5736DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5737{
5738 NOREF(pMixedCtx);
5739 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
5740 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5741 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5742 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5743}
5744
5745
5746/**
5747 * Handle a condition that occurred while delivering an event through the guest
5748 * IDT.
5749 *
5750 * @returns VBox status code (informational error codes included).
5751 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5752 * @retval VINF_HM_DOUBLE_FAULT if a #DF condition was detected and we ought to
5753 * continue execution of the guest which will delivery the #DF.
5754 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5755 *
5756 * @param pVCpu Pointer to the VMCPU.
5757 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5758 * out-of-sync. Make sure to update the required fields
5759 * before using them.
5760 * @param pVmxTransient Pointer to the VMX transient structure.
5761 *
5762 * @remarks No-long-jump zone!!!
5763 */
5764static int hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
5765{
5766 uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
5767
5768 int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
5769 AssertRCReturn(rc, rc);
5770 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
5771 {
5772 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
5773 AssertRCReturn(rc, rc);
5774
5775 uint32_t uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
5776 uint32_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
5777
5778 typedef enum
5779 {
5780 VMXREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
5781 VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
5782 VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
5783 VMXREFLECTXCPT_NONE /* Nothing to reflect. */
5784 } VMXREFLECTXCPT;
5785
5786 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */
5787 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;
5788 if (VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo))
5789 {
5790 if (uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)
5791 {
5792 enmReflect = VMXREFLECTXCPT_XCPT;
5793#ifdef VBOX_STRICT
5794 if ( hmR0VmxIsContributoryXcpt(uIdtVector)
5795 && uExitVector == X86_XCPT_PF)
5796 {
5797 Log4(("IDT: vcpu[%RU32] Contributory #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5798 }
5799#endif
5800 if ( uExitVector == X86_XCPT_PF
5801 && uIdtVector == X86_XCPT_PF)
5802 {
5803 pVmxTransient->fVectoringPF = true;
5804 Log4(("IDT: vcpu[%RU32] Vectoring #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5805 }
5806 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
5807 && hmR0VmxIsContributoryXcpt(uExitVector)
5808 && ( hmR0VmxIsContributoryXcpt(uIdtVector)
5809 || uIdtVector == X86_XCPT_PF))
5810 {
5811 enmReflect = VMXREFLECTXCPT_DF;
5812 }
5813 else if (uIdtVector == X86_XCPT_DF)
5814 enmReflect = VMXREFLECTXCPT_TF;
5815 }
5816 else if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5817 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
5818 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
5819 {
5820 /*
5821 * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and
5822 * privileged software exception (#DB from ICEBP) as they reoccur when restarting the instruction.
5823 */
5824 enmReflect = VMXREFLECTXCPT_XCPT;
5825 }
5826 }
5827 else if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5828 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
5829 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
5830 {
5831 /*
5832 * If event delivery caused an EPT violation/misconfig or APIC access VM-exit, then the VM-exit
5833 * interruption-information will not be valid as it's not an exception and we end up here. In such cases,
5834 * it is sufficient to reflect the original exception to the guest after handling the VM-exit.
5835 */
5836 enmReflect = VMXREFLECTXCPT_XCPT;
5837 }
5838
5839 /*
5840 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig etc.) occurred
5841 * while delivering the NMI, we need to clear the block-by-NMI field in the guest interruptibility-state before
5842 * re-delivering the NMI after handling the VM-exit. Otherwise the subsequent VM-entry would fail.
5843 *
5844 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}.
5845 */
5846 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5847 && enmReflect == VMXREFLECTXCPT_XCPT
5848 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
5849 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
5850 {
5851 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
5852 }
5853
5854 switch (enmReflect)
5855 {
5856 case VMXREFLECTXCPT_XCPT:
5857 {
5858 Assert( uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5859 && uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5860 && uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
5861
5862 uint32_t u32ErrCode = 0;
5863 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo))
5864 {
5865 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
5866 AssertRCReturn(rc, rc);
5867 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5868 }
5869
5870 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */
5871 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
5872 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
5873 rc = VINF_SUCCESS;
5874 Log4(("IDT: vcpu[%RU32] Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->idCpu,
5875 pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.u32ErrCode));
5876
5877 break;
5878 }
5879
5880 case VMXREFLECTXCPT_DF:
5881 {
5882 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
5883 rc = VINF_HM_DOUBLE_FAULT;
5884 Log4(("IDT: vcpu[%RU32] Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->idCpu,
5885 pVCpu->hm.s.Event.u64IntInfo, uIdtVector, uExitVector));
5886
5887 break;
5888 }
5889
5890 case VMXREFLECTXCPT_TF:
5891 {
5892 rc = VINF_EM_RESET;
5893 Log4(("IDT: vcpu[%RU32] Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", pVCpu->idCpu, uIdtVector,
5894 uExitVector));
5895 break;
5896 }
5897
5898 default:
5899 Assert(rc == VINF_SUCCESS);
5900 break;
5901 }
5902 }
5903 else if ( VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(pVmxTransient->uExitIntInfo)
5904 && uExitVector != X86_XCPT_DF
5905 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
5906 {
5907 /*
5908 * Execution of IRET caused this fault when NMI blocking was in effect (i.e we're in the guest NMI handler).
5909 * We need to set the block-by-NMI field so that NMIs remain blocked until the IRET execution is restarted.
5910 * See Intel spec. 30.7.1.2 "Resuming guest software after handling an exception".
5911 */
5912 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
5913 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5914 }
5915
5916 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET);
5917 return rc;
5918}
5919
5920
5921/**
5922 * Saves the guest's CR0 register from the VMCS into the guest-CPU context.
5923 *
5924 * @returns VBox status code.
5925 * @param pVCpu Pointer to the VMCPU.
5926 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5927 * out-of-sync. Make sure to update the required fields
5928 * before using them.
5929 *
5930 * @remarks No-long-jump zone!!!
5931 */
5932static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5933{
5934 NOREF(pMixedCtx);
5935
5936 /*
5937 * While in the middle of saving guest-CR0, we could get preempted and re-invoked from the preemption hook,
5938 * see hmR0VmxLeave(). Safer to just make this code non-preemptible.
5939 */
5940 VMMRZCallRing3Disable(pVCpu);
5941 HM_DISABLE_PREEMPT_IF_NEEDED();
5942
5943 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0))
5944 {
5945 uint32_t uVal = 0;
5946 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal);
5947 AssertRCReturn(rc, rc);
5948
5949 uint32_t uShadow = 0;
5950 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
5951 AssertRCReturn(rc, rc);
5952
5953 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR0Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR0Mask);
5954 CPUMSetGuestCR0(pVCpu, uVal);
5955 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0);
5956 }
5957
5958 HM_RESTORE_PREEMPT_IF_NEEDED();
5959 VMMRZCallRing3Enable(pVCpu);
5960 return VINF_SUCCESS;
5961}
5962
5963
5964/**
5965 * Saves the guest's CR4 register from the VMCS into the guest-CPU context.
5966 *
5967 * @returns VBox status code.
5968 * @param pVCpu Pointer to the VMCPU.
5969 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5970 * out-of-sync. Make sure to update the required fields
5971 * before using them.
5972 *
5973 * @remarks No-long-jump zone!!!
5974 */
5975static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5976{
5977 NOREF(pMixedCtx);
5978
5979 int rc = VINF_SUCCESS;
5980 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4))
5981 {
5982 uint32_t uVal = 0;
5983 uint32_t uShadow = 0;
5984 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal);
5985 AssertRCReturn(rc, rc);
5986 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
5987 AssertRCReturn(rc, rc);
5988
5989 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR4Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR4Mask);
5990 CPUMSetGuestCR4(pVCpu, uVal);
5991 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4);
5992 }
5993 return rc;
5994}
5995
5996
5997/**
5998 * Saves the guest's RIP register from the VMCS into the guest-CPU context.
5999 *
6000 * @returns VBox status code.
6001 * @param pVCpu Pointer to the VMCPU.
6002 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6003 * out-of-sync. Make sure to update the required fields
6004 * before using them.
6005 *
6006 * @remarks No-long-jump zone!!!
6007 */
6008static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6009{
6010 int rc = VINF_SUCCESS;
6011 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP))
6012 {
6013 uint64_t u64Val = 0;
6014 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
6015 AssertRCReturn(rc, rc);
6016
6017 pMixedCtx->rip = u64Val;
6018 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP);
6019 }
6020 return rc;
6021}
6022
6023
6024/**
6025 * Saves the guest's RSP register from the VMCS into the guest-CPU context.
6026 *
6027 * @returns VBox status code.
6028 * @param pVCpu Pointer to the VMCPU.
6029 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6030 * out-of-sync. Make sure to update the required fields
6031 * before using them.
6032 *
6033 * @remarks No-long-jump zone!!!
6034 */
6035static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6036{
6037 int rc = VINF_SUCCESS;
6038 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP))
6039 {
6040 uint64_t u64Val = 0;
6041 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
6042 AssertRCReturn(rc, rc);
6043
6044 pMixedCtx->rsp = u64Val;
6045 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP);
6046 }
6047 return rc;
6048}
6049
6050
6051/**
6052 * Saves the guest's RFLAGS from the VMCS into the guest-CPU context.
6053 *
6054 * @returns VBox status code.
6055 * @param pVCpu Pointer to the VMCPU.
6056 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6057 * out-of-sync. Make sure to update the required fields
6058 * before using them.
6059 *
6060 * @remarks No-long-jump zone!!!
6061 */
6062static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6063{
6064 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS))
6065 {
6066 uint32_t uVal = 0;
6067 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal);
6068 AssertRCReturn(rc, rc);
6069
6070 pMixedCtx->eflags.u32 = uVal;
6071 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) /* Undo our real-on-v86-mode changes to eflags if necessary. */
6072 {
6073 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
6074 Log4(("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32));
6075
6076 pMixedCtx->eflags.Bits.u1VM = 0;
6077 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;
6078 }
6079
6080 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS);
6081 }
6082 return VINF_SUCCESS;
6083}
6084
6085
6086/**
6087 * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the
6088 * guest-CPU context.
6089 */
6090DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6091{
6092 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6093 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
6094 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6095 return rc;
6096}
6097
6098
6099/**
6100 * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
6101 * from the guest-state area in the VMCS.
6102 *
6103 * @param pVCpu Pointer to the VMCPU.
6104 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6105 * out-of-sync. Make sure to update the required fields
6106 * before using them.
6107 *
6108 * @remarks No-long-jump zone!!!
6109 */
6110static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6111{
6112 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE))
6113 {
6114 uint32_t uIntrState = 0;
6115 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
6116 AssertRC(rc);
6117
6118 if (!uIntrState)
6119 {
6120 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6121 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6122
6123 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6124 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6125 }
6126 else
6127 {
6128 if (uIntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
6129 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI))
6130 {
6131 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6132 AssertRC(rc);
6133 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */
6134 AssertRC(rc);
6135
6136 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
6137 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
6138 }
6139 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6140 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6141
6142 if (uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)
6143 {
6144 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6145 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6146 }
6147 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6148 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6149 }
6150
6151 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE);
6152 }
6153}
6154
6155
6156/**
6157 * Saves the guest's activity state.
6158 *
6159 * @returns VBox status code.
6160 * @param pVCpu Pointer to the VMCPU.
6161 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6162 * out-of-sync. Make sure to update the required fields
6163 * before using them.
6164 *
6165 * @remarks No-long-jump zone!!!
6166 */
6167static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6168{
6169 NOREF(pMixedCtx);
6170 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
6171 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_ACTIVITY_STATE);
6172 return VINF_SUCCESS;
6173}
6174
6175
6176/**
6177 * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from
6178 * the current VMCS into the guest-CPU context.
6179 *
6180 * @returns VBox status code.
6181 * @param pVCpu Pointer to the VMCPU.
6182 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6183 * out-of-sync. Make sure to update the required fields
6184 * before using them.
6185 *
6186 * @remarks No-long-jump zone!!!
6187 */
6188static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6189{
6190 int rc = VINF_SUCCESS;
6191 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
6192 {
6193 uint32_t u32Val = 0;
6194 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc);
6195 pMixedCtx->SysEnter.cs = u32Val;
6196 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR);
6197 }
6198
6199 uint64_t u64Val = 0;
6200 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
6201 {
6202 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &u64Val); AssertRCReturn(rc, rc);
6203 pMixedCtx->SysEnter.eip = u64Val;
6204 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR);
6205 }
6206 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
6207 {
6208 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &u64Val); AssertRCReturn(rc, rc);
6209 pMixedCtx->SysEnter.esp = u64Val;
6210 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR);
6211 }
6212 return rc;
6213}
6214
6215
6216/**
6217 * Saves the set of guest MSRs (that we restore lazily while leaving VT-x) from
6218 * the CPU back into the guest-CPU context.
6219 *
6220 * @returns VBox status code.
6221 * @param pVCpu Pointer to the VMCPU.
6222 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6223 * out-of-sync. Make sure to update the required fields
6224 * before using them.
6225 *
6226 * @remarks No-long-jump zone!!!
6227 */
6228static int hmR0VmxSaveGuestLazyMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6229{
6230#if HC_ARCH_BITS == 64
6231 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
6232 {
6233 /* Since this can be called from our preemption hook it's safer to make the guest-MSRs update non-preemptible. */
6234 VMMRZCallRing3Disable(pVCpu);
6235 HM_DISABLE_PREEMPT_IF_NEEDED();
6236
6237 /* Doing the check here ensures we don't overwrite already-saved guest MSRs from a preemption hook. */
6238 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS))
6239 {
6240 hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx);
6241 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6242 }
6243
6244 HM_RESTORE_PREEMPT_IF_NEEDED();
6245 VMMRZCallRing3Enable(pVCpu);
6246 }
6247 else
6248 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6249#else
6250 NOREF(pMixedCtx);
6251 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6252#endif
6253
6254 return VINF_SUCCESS;
6255}
6256
6257
6258/**
6259 * Saves the auto load/store'd guest MSRs from the current VMCS into
6260 * the guest-CPU context.
6261 *
6262 * @returns VBox status code.
6263 * @param pVCpu Pointer to the VMCPU.
6264 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6265 * out-of-sync. Make sure to update the required fields
6266 * before using them.
6267 *
6268 * @remarks No-long-jump zone!!!
6269 */
6270static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6271{
6272 if (HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS))
6273 return VINF_SUCCESS;
6274
6275 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
6276 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
6277 Log4(("hmR0VmxSaveGuestAutoLoadStoreMsrs: cMsrs=%u\n", cMsrs));
6278 for (uint32_t i = 0; i < cMsrs; i++, pMsr++)
6279 {
6280 switch (pMsr->u32Msr)
6281 {
6282 case MSR_K8_TSC_AUX: CPUMR0SetGuestTscAux(pVCpu, pMsr->u64Value); break;
6283 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;
6284 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;
6285 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;
6286 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;
6287 case MSR_K6_EFER: /* Nothing to do here since we intercept writes, see hmR0VmxLoadGuestMsrs(). */
6288 break;
6289
6290 default:
6291 {
6292 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, cMsrs));
6293 pVCpu->hm.s.u32HMError = pMsr->u32Msr;
6294 return VERR_HM_UNEXPECTED_LD_ST_MSR;
6295 }
6296 }
6297 }
6298
6299 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS);
6300 return VINF_SUCCESS;
6301}
6302
6303
6304/**
6305 * Saves the guest control registers from the current VMCS into the guest-CPU
6306 * context.
6307 *
6308 * @returns VBox status code.
6309 * @param pVCpu Pointer to the VMCPU.
6310 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6311 * out-of-sync. Make sure to update the required fields
6312 * before using them.
6313 *
6314 * @remarks No-long-jump zone!!!
6315 */
6316static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6317{
6318 /* Guest CR0. Guest FPU. */
6319 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6320 AssertRCReturn(rc, rc);
6321
6322 /* Guest CR4. */
6323 rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
6324 AssertRCReturn(rc, rc);
6325
6326 /* Guest CR2 - updated always during the world-switch or in #PF. */
6327 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
6328 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3))
6329 {
6330 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
6331 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4));
6332
6333 PVM pVM = pVCpu->CTX_SUFF(pVM);
6334 if ( pVM->hm.s.vmx.fUnrestrictedGuest
6335 || ( pVM->hm.s.fNestedPaging
6336 && CPUMIsGuestPagingEnabledEx(pMixedCtx)))
6337 {
6338 uint64_t u64Val = 0;
6339 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
6340 if (pMixedCtx->cr3 != u64Val)
6341 {
6342 CPUMSetGuestCR3(pVCpu, u64Val);
6343 if (VMMRZCallRing3IsEnabled(pVCpu))
6344 {
6345 PGMUpdateCR3(pVCpu, u64Val);
6346 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6347 }
6348 else
6349 {
6350 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
6351 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
6352 }
6353 }
6354
6355 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
6356 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR (EFER is always up-to-date). */
6357 {
6358 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
6359 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
6360 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
6361 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
6362
6363 if (VMMRZCallRing3IsEnabled(pVCpu))
6364 {
6365 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6366 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6367 }
6368 else
6369 {
6370 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
6371 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
6372 }
6373 }
6374 }
6375
6376 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3);
6377 }
6378
6379 /*
6380 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
6381 * -> VMMRZCallRing3Disable() -> hmR0VmxSaveGuestState() -> Set VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
6382 * -> continue with VM-exit handling -> hmR0VmxSaveGuestControlRegs() and here we are.
6383 *
6384 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
6385 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
6386 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
6387 * -NOT- check if HMVMX_UPDATED_GUEST_CR3 is already set or not!
6388 *
6389 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
6390 */
6391 if (VMMRZCallRing3IsEnabled(pVCpu))
6392 {
6393 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6394 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
6395
6396 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6397 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6398
6399 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6400 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6401 }
6402
6403 return rc;
6404}
6405
6406
6407/**
6408 * Reads a guest segment register from the current VMCS into the guest-CPU
6409 * context.
6410 *
6411 * @returns VBox status code.
6412 * @param pVCpu Pointer to the VMCPU.
6413 * @param idxSel Index of the selector in the VMCS.
6414 * @param idxLimit Index of the segment limit in the VMCS.
6415 * @param idxBase Index of the segment base in the VMCS.
6416 * @param idxAccess Index of the access rights of the segment in the VMCS.
6417 * @param pSelReg Pointer to the segment selector.
6418 *
6419 * @remarks No-long-jump zone!!!
6420 * @remarks Never call this function directly!!! Use the VMXLOCAL_READ_SEG()
6421 * macro as that takes care of whether to read from the VMCS cache or
6422 * not.
6423 */
6424DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
6425 PCPUMSELREG pSelReg)
6426{
6427 NOREF(pVCpu);
6428
6429 uint32_t u32Val = 0;
6430 int rc = VMXReadVmcs32(idxSel, &u32Val);
6431 AssertRCReturn(rc, rc);
6432 pSelReg->Sel = (uint16_t)u32Val;
6433 pSelReg->ValidSel = (uint16_t)u32Val;
6434 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6435
6436 rc = VMXReadVmcs32(idxLimit, &u32Val);
6437 AssertRCReturn(rc, rc);
6438 pSelReg->u32Limit = u32Val;
6439
6440 uint64_t u64Val = 0;
6441 rc = VMXReadVmcsGstNByIdxVal(idxBase, &u64Val);
6442 AssertRCReturn(rc, rc);
6443 pSelReg->u64Base = u64Val;
6444
6445 rc = VMXReadVmcs32(idxAccess, &u32Val);
6446 AssertRCReturn(rc, rc);
6447 pSelReg->Attr.u = u32Val;
6448
6449 /*
6450 * If VT-x marks the segment as unusable, most other bits remain undefined:
6451 * - For CS the L, D and G bits have meaning.
6452 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
6453 * - For the remaining data segments no bits are defined.
6454 *
6455 * The present bit and the unusable bit has been observed to be set at the
6456 * same time (the selector was supposed to be invalid as we started executing
6457 * a V8086 interrupt in ring-0).
6458 *
6459 * What should be important for the rest of the VBox code, is that the P bit is
6460 * cleared. Some of the other VBox code recognizes the unusable bit, but
6461 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
6462 * safe side here, we'll strip off P and other bits we don't care about. If
6463 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
6464 *
6465 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
6466 */
6467 if (pSelReg->Attr.u & X86DESCATTR_UNUSABLE)
6468 {
6469 Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR); /* TR is the only selector that can never be unusable. */
6470
6471 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
6472 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
6473 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
6474
6475 Log4(("hmR0VmxReadSegmentReg: Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Val, pSelReg->Attr.u));
6476#ifdef DEBUG_bird
6477 AssertMsg((u32Val & ~X86DESCATTR_P) == pSelReg->Attr.u,
6478 ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
6479 idxSel, u32Val, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
6480#endif
6481 }
6482 return VINF_SUCCESS;
6483}
6484
6485
6486#ifdef VMX_USE_CACHED_VMCS_ACCESSES
6487# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
6488 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
6489 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
6490#else
6491# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
6492 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
6493 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
6494#endif
6495
6496
6497/**
6498 * Saves the guest segment registers from the current VMCS into the guest-CPU
6499 * context.
6500 *
6501 * @returns VBox status code.
6502 * @param pVCpu Pointer to the VMCPU.
6503 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6504 * out-of-sync. Make sure to update the required fields
6505 * before using them.
6506 *
6507 * @remarks No-long-jump zone!!!
6508 */
6509static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6510{
6511 /* Guest segment registers. */
6512 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS))
6513 {
6514 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); AssertRCReturn(rc, rc);
6515 rc = VMXLOCAL_READ_SEG(CS, cs); AssertRCReturn(rc, rc);
6516 rc = VMXLOCAL_READ_SEG(SS, ss); AssertRCReturn(rc, rc);
6517 rc = VMXLOCAL_READ_SEG(DS, ds); AssertRCReturn(rc, rc);
6518 rc = VMXLOCAL_READ_SEG(ES, es); AssertRCReturn(rc, rc);
6519 rc = VMXLOCAL_READ_SEG(FS, fs); AssertRCReturn(rc, rc);
6520 rc = VMXLOCAL_READ_SEG(GS, gs); AssertRCReturn(rc, rc);
6521
6522 /* Restore segment attributes for real-on-v86 mode hack. */
6523 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6524 {
6525 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;
6526 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;
6527 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;
6528 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;
6529 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;
6530 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
6531 }
6532 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS);
6533 }
6534
6535 return VINF_SUCCESS;
6536}
6537
6538
6539/**
6540 * Saves the guest descriptor table registers and task register from the current
6541 * VMCS into the guest-CPU context.
6542 *
6543 * @returns VBox status code.
6544 * @param pVCpu Pointer to the VMCPU.
6545 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6546 * out-of-sync. Make sure to update the required fields
6547 * before using them.
6548 *
6549 * @remarks No-long-jump zone!!!
6550 */
6551static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6552{
6553 int rc = VINF_SUCCESS;
6554
6555 /* Guest LDTR. */
6556 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR))
6557 {
6558 rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
6559 AssertRCReturn(rc, rc);
6560 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR);
6561 }
6562
6563 /* Guest GDTR. */
6564 uint64_t u64Val = 0;
6565 uint32_t u32Val = 0;
6566 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR))
6567 {
6568 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
6569 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
6570 pMixedCtx->gdtr.pGdt = u64Val;
6571 pMixedCtx->gdtr.cbGdt = u32Val;
6572 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR);
6573 }
6574
6575 /* Guest IDTR. */
6576 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR))
6577 {
6578 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
6579 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
6580 pMixedCtx->idtr.pIdt = u64Val;
6581 pMixedCtx->idtr.cbIdt = u32Val;
6582 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR);
6583 }
6584
6585 /* Guest TR. */
6586 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR))
6587 {
6588 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6589 AssertRCReturn(rc, rc);
6590
6591 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
6592 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6593 {
6594 rc = VMXLOCAL_READ_SEG(TR, tr);
6595 AssertRCReturn(rc, rc);
6596 }
6597 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR);
6598 }
6599 return rc;
6600}
6601
6602#undef VMXLOCAL_READ_SEG
6603
6604
6605/**
6606 * Saves the guest debug-register DR7 from the current VMCS into the guest-CPU
6607 * context.
6608 *
6609 * @returns VBox status code.
6610 * @param pVCpu Pointer to the VMCPU.
6611 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6612 * out-of-sync. Make sure to update the required fields
6613 * before using them.
6614 *
6615 * @remarks No-long-jump zone!!!
6616 */
6617static int hmR0VmxSaveGuestDR7(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6618{
6619 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DEBUG))
6620 {
6621 if (!pVCpu->hm.s.fUsingHyperDR7)
6622 {
6623 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
6624 uint32_t u32Val;
6625 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); AssertRCReturn(rc, rc);
6626 pMixedCtx->dr[7] = u32Val;
6627 }
6628
6629 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DEBUG);
6630 }
6631 return VINF_SUCCESS;
6632}
6633
6634
6635/**
6636 * Saves the guest APIC state from the current VMCS into the guest-CPU context.
6637 *
6638 * @returns VBox status code.
6639 * @param pVCpu Pointer to the VMCPU.
6640 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6641 * out-of-sync. Make sure to update the required fields
6642 * before using them.
6643 *
6644 * @remarks No-long-jump zone!!!
6645 */
6646static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6647{
6648 NOREF(pMixedCtx);
6649
6650 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
6651 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_APIC_STATE);
6652 return VINF_SUCCESS;
6653}
6654
6655
6656/**
6657 * Saves the entire guest state from the currently active VMCS into the
6658 * guest-CPU context. This essentially VMREADs all guest-data.
6659 *
6660 * @returns VBox status code.
6661 * @param pVCpu Pointer to the VMCPU.
6662 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6663 * out-of-sync. Make sure to update the required fields
6664 * before using them.
6665 */
6666static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6667{
6668 Assert(pVCpu);
6669 Assert(pMixedCtx);
6670
6671 if (HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL)
6672 return VINF_SUCCESS;
6673
6674 /* Though we can longjmp to ring-3 due to log-flushes here and get recalled
6675 again on the ring-3 callback path, there is no real need to. */
6676 if (VMMRZCallRing3IsEnabled(pVCpu))
6677 VMMR0LogFlushDisable(pVCpu);
6678 else
6679 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6680 Log4Func(("vcpu[%RU32]\n", pVCpu->idCpu));
6681
6682 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
6683 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6684
6685 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6686 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6687
6688 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6689 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6690
6691 rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
6692 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6693
6694 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
6695 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDR7 failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6696
6697 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
6698 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6699
6700 rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
6701 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestLazyMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6702
6703 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
6704 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6705
6706 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
6707 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6708
6709 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
6710 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestApicState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6711
6712 AssertMsg(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL,
6713 ("Missed guest state bits while saving state; residue %RX32\n", HMVMXCPU_GST_VALUE(pVCpu)));
6714
6715 if (VMMRZCallRing3IsEnabled(pVCpu))
6716 VMMR0LogFlushEnable(pVCpu);
6717
6718 return rc;
6719}
6720
6721
6722/**
6723 * Check per-VM and per-VCPU force flag actions that require us to go back to
6724 * ring-3 for one reason or another.
6725 *
6726 * @returns VBox status code (information status code included).
6727 * @retval VINF_SUCCESS if we don't have any actions that require going back to
6728 * ring-3.
6729 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
6730 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
6731 * interrupts)
6732 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
6733 * all EMTs to be in ring-3.
6734 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
6735 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
6736 * to the EM loop.
6737 *
6738 * @param pVM Pointer to the VM.
6739 * @param pVCpu Pointer to the VMCPU.
6740 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6741 * out-of-sync. Make sure to update the required fields
6742 * before using them.
6743 */
6744static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6745{
6746 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6747
6748 if ( VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
6749 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
6750 || VMCPU_FF_IS_PENDING(pVCpu, !pVCpu->hm.s.fSingleInstruction
6751 ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
6752 {
6753 /* We need the control registers now, make sure the guest-CPU context is updated. */
6754 int rc3 = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6755 AssertRCReturn(rc3, rc3);
6756
6757 /* Pending HM CR3 sync. */
6758 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6759 {
6760 int rc2 = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
6761 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
6762 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
6763 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6764 }
6765
6766 /* Pending HM PAE PDPEs. */
6767 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6768 {
6769 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6770 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6771 }
6772
6773 /* Pending PGM C3 sync. */
6774 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
6775 {
6776 int rc2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4,
6777 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
6778 if (rc2 != VINF_SUCCESS)
6779 {
6780 AssertRC(rc2);
6781 Log4(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", rc2));
6782 return rc2;
6783 }
6784 }
6785
6786 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
6787 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
6788 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
6789 {
6790 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
6791 int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
6792 Log4(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
6793 return rc2;
6794 }
6795
6796 /* Pending VM request packets, such as hardware interrupts. */
6797 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
6798 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
6799 {
6800 Log4(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
6801 return VINF_EM_PENDING_REQUEST;
6802 }
6803
6804 /* Pending PGM pool flushes. */
6805 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
6806 {
6807 Log4(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
6808 return VINF_PGM_POOL_FLUSH_PENDING;
6809 }
6810
6811 /* Pending DMA requests. */
6812 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
6813 {
6814 Log4(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
6815 return VINF_EM_RAW_TO_R3;
6816 }
6817 }
6818
6819 return VINF_SUCCESS;
6820}
6821
6822
6823/**
6824 * Converts any TRPM trap into a pending HM event. This is typically used when
6825 * entering from ring-3 (not longjmp returns).
6826 *
6827 * @param pVCpu Pointer to the VMCPU.
6828 */
6829static void hmR0VmxTrpmTrapToPendingEvent(PVMCPU pVCpu)
6830{
6831 Assert(TRPMHasTrap(pVCpu));
6832 Assert(!pVCpu->hm.s.Event.fPending);
6833
6834 uint8_t uVector;
6835 TRPMEVENT enmTrpmEvent;
6836 RTGCUINT uErrCode;
6837 RTGCUINTPTR GCPtrFaultAddress;
6838 uint8_t cbInstr;
6839
6840 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
6841 AssertRC(rc);
6842
6843 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntInfo. */
6844 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
6845 if (enmTrpmEvent == TRPM_TRAP)
6846 {
6847 switch (uVector)
6848 {
6849 case X86_XCPT_NMI:
6850 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6851 break;
6852
6853 case X86_XCPT_BP:
6854 case X86_XCPT_OF:
6855 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6856 break;
6857
6858 case X86_XCPT_PF:
6859 case X86_XCPT_DF:
6860 case X86_XCPT_TS:
6861 case X86_XCPT_NP:
6862 case X86_XCPT_SS:
6863 case X86_XCPT_GP:
6864 case X86_XCPT_AC:
6865 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6866 /* no break! */
6867 default:
6868 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6869 break;
6870 }
6871 }
6872 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
6873 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6874 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
6875 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6876 else
6877 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
6878
6879 rc = TRPMResetTrap(pVCpu);
6880 AssertRC(rc);
6881 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
6882 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
6883
6884 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
6885 STAM_COUNTER_DEC(&pVCpu->hm.s.StatInjectPendingReflect);
6886}
6887
6888
6889/**
6890 * Converts any pending HM event into a TRPM trap. Typically used when leaving
6891 * VT-x to execute any instruction.
6892 *
6893 * @param pvCpu Pointer to the VMCPU.
6894 */
6895static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
6896{
6897 Assert(pVCpu->hm.s.Event.fPending);
6898
6899 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
6900 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo);
6901 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntInfo);
6902 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
6903
6904 /* If a trap was already pending, we did something wrong! */
6905 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
6906
6907 TRPMEVENT enmTrapType;
6908 switch (uVectorType)
6909 {
6910 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
6911 enmTrapType = TRPM_HARDWARE_INT;
6912 break;
6913
6914 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
6915 enmTrapType = TRPM_SOFTWARE_INT;
6916 break;
6917
6918 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
6919 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
6920 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
6921 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
6922 enmTrapType = TRPM_TRAP;
6923 break;
6924
6925 default:
6926 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
6927 enmTrapType = TRPM_32BIT_HACK;
6928 break;
6929 }
6930
6931 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
6932
6933 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
6934 AssertRC(rc);
6935
6936 if (fErrorCodeValid)
6937 TRPMSetErrorCode(pVCpu, uErrorCode);
6938
6939 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6940 && uVector == X86_XCPT_PF)
6941 {
6942 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
6943 }
6944 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6945 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6946 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6947 {
6948 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6949 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
6950 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
6951 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
6952 }
6953 pVCpu->hm.s.Event.fPending = false;
6954}
6955
6956
6957/**
6958 * Does the necessary state syncing before returning to ring-3 for any reason
6959 * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
6960 *
6961 * @returns VBox status code.
6962 * @param pVM Pointer to the VM.
6963 * @param pVCpu Pointer to the VMCPU.
6964 * @param pMixedCtx Pointer to the guest-CPU context. The data may
6965 * be out-of-sync. Make sure to update the required
6966 * fields before using them.
6967 * @param fSaveGuestState Whether to save the guest state or not.
6968 *
6969 * @remarks No-long-jmp zone!!!
6970 */
6971static int hmR0VmxLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fSaveGuestState)
6972{
6973 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6974 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6975
6976 RTCPUID idCpu = RTMpCpuId();
6977 Log4Func(("HostCpuId=%u\n", idCpu));
6978
6979 /*
6980 * !!! IMPORTANT !!!
6981 * If you modify code here, make sure to check whether hmR0VmxCallRing3Callback() needs to be updated too.
6982 */
6983
6984 /* Save the guest state if necessary. */
6985 if ( fSaveGuestState
6986 && HMVMXCPU_GST_VALUE(pVCpu) != HMVMX_UPDATED_GUEST_ALL)
6987 {
6988 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
6989 AssertRCReturn(rc, rc);
6990 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
6991 }
6992
6993 /* Restore host FPU state if necessary and resync on next R0 reentry .*/
6994 if (CPUMIsGuestFPUStateActive(pVCpu))
6995 {
6996 /* We shouldn't reload CR0 without saving it first. */
6997 if (!fSaveGuestState)
6998 {
6999 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7000 AssertRCReturn(rc, rc);
7001 }
7002 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
7003 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
7004 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
7005 }
7006
7007 /* Restore host debug registers if necessary and resync on next R0 reentry. */
7008#ifdef VBOX_STRICT
7009 if (CPUMIsHyperDebugStateActive(pVCpu))
7010 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
7011#endif
7012 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */))
7013 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
7014 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu));
7015 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu));
7016
7017#if HC_ARCH_BITS == 64
7018 /* Restore host-state bits that VT-x only restores partially. */
7019 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
7020 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
7021 {
7022 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu));
7023 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
7024 }
7025 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
7026#endif
7027
7028#if HC_ARCH_BITS == 64
7029 /* Restore the host MSRs as we're leaving VT-x context. */
7030 if ( pVM->hm.s.fAllow64BitGuests
7031 && pVCpu->hm.s.vmx.fRestoreHostMsrs)
7032 {
7033 /* We shouldn't reload the guest MSRs without saving it first. */
7034 if (!fSaveGuestState)
7035 {
7036 int rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
7037 AssertRCReturn(rc, rc);
7038 }
7039 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS));
7040 hmR0VmxLazyRestoreHostMsrs(pVCpu);
7041 Assert(!pVCpu->hm.s.vmx.fRestoreHostMsrs);
7042 }
7043#endif
7044
7045 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
7046 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
7047
7048 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
7049 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
7050 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
7051 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
7052 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
7053 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
7054 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
7055 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
7056
7057 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
7058
7059 /** @todo This partially defeats the purpose of having preemption hooks.
7060 * The problem is, deregistering the hooks should be moved to a place that
7061 * lasts until the EMT is about to be destroyed not everytime while leaving HM
7062 * context.
7063 */
7064 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7065 {
7066 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7067 AssertRCReturn(rc, rc);
7068
7069 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
7070 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
7071 }
7072 Assert(!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED));
7073 NOREF(idCpu);
7074
7075 return VINF_SUCCESS;
7076}
7077
7078
7079/**
7080 * Leaves the VT-x session.
7081 *
7082 * @returns VBox status code.
7083 * @param pVM Pointer to the VM.
7084 * @param pVCpu Pointer to the VMCPU.
7085 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7086 * out-of-sync. Make sure to update the required fields
7087 * before using them.
7088 *
7089 * @remarks No-long-jmp zone!!!
7090 */
7091DECLINLINE(int) hmR0VmxLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7092{
7093 HM_DISABLE_PREEMPT_IF_NEEDED();
7094 HMVMX_ASSERT_CPU_SAFE();
7095 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7096 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7097
7098 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
7099 and done this from the VMXR0ThreadCtxCallback(). */
7100 if (!pVCpu->hm.s.fLeaveDone)
7101 {
7102 int rc2 = hmR0VmxLeave(pVM, pVCpu, pMixedCtx, true /* fSaveGuestState */);
7103 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT_IF_NEEDED(), rc2);
7104 pVCpu->hm.s.fLeaveDone = true;
7105 }
7106 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
7107
7108 /*
7109 * !!! IMPORTANT !!!
7110 * If you modify code here, make sure to check whether hmR0VmxCallRing3Callback() needs to be updated too.
7111 */
7112
7113 /* Deregister hook now that we've left HM context before re-enabling preemption. */
7114 /** @todo This is bad. Deregistering here means we need to VMCLEAR always
7115 * (longjmp/exit-to-r3) in VT-x which is not efficient. */
7116 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
7117 VMMR0ThreadCtxHooksDeregister(pVCpu);
7118
7119 /* Leave HM context. This takes care of local init (term). */
7120 int rc = HMR0LeaveCpu(pVCpu);
7121
7122 HM_RESTORE_PREEMPT_IF_NEEDED();
7123
7124 return rc;
7125}
7126
7127
7128/**
7129 * Does the necessary state syncing before doing a longjmp to ring-3.
7130 *
7131 * @returns VBox status code.
7132 * @param pVM Pointer to the VM.
7133 * @param pVCpu Pointer to the VMCPU.
7134 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7135 * out-of-sync. Make sure to update the required fields
7136 * before using them.
7137 *
7138 * @remarks No-long-jmp zone!!!
7139 */
7140DECLINLINE(int) hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7141{
7142 return hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
7143}
7144
7145
7146/**
7147 * Take necessary actions before going back to ring-3.
7148 *
7149 * An action requires us to go back to ring-3. This function does the necessary
7150 * steps before we can safely return to ring-3. This is not the same as longjmps
7151 * to ring-3, this is voluntary and prepares the guest so it may continue
7152 * executing outside HM (recompiler/IEM).
7153 *
7154 * @returns VBox status code.
7155 * @param pVM Pointer to the VM.
7156 * @param pVCpu Pointer to the VMCPU.
7157 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7158 * out-of-sync. Make sure to update the required fields
7159 * before using them.
7160 * @param rcExit The reason for exiting to ring-3. Can be
7161 * VINF_VMM_UNKNOWN_RING3_CALL.
7162 */
7163static int hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
7164{
7165 Assert(pVM);
7166 Assert(pVCpu);
7167 Assert(pMixedCtx);
7168 HMVMX_ASSERT_PREEMPT_SAFE();
7169
7170 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
7171 {
7172 VMXGetActivatedVmcs(&pVCpu->hm.s.vmx.LastError.u64VMCSPhys);
7173 pVCpu->hm.s.vmx.LastError.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
7174 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
7175 /* LastError.idCurrentCpu was updated in hmR0VmxPreRunGuestCommitted(). */
7176 }
7177
7178 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
7179 VMMRZCallRing3Disable(pVCpu);
7180 Log4(("hmR0VmxExitToRing3: pVCpu=%p idCpu=%RU32 rcExit=%d\n", pVCpu, pVCpu->idCpu, rcExit));
7181
7182 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
7183 if (pVCpu->hm.s.Event.fPending)
7184 {
7185 hmR0VmxPendingEventToTrpmTrap(pVCpu);
7186 Assert(!pVCpu->hm.s.Event.fPending);
7187 }
7188
7189 /* Save guest state and restore host state bits. */
7190 int rc = hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
7191 AssertRCReturn(rc, rc);
7192 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
7193
7194 /* Sync recompiler state. */
7195 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
7196 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
7197 | CPUM_CHANGED_LDTR
7198 | CPUM_CHANGED_GDTR
7199 | CPUM_CHANGED_IDTR
7200 | CPUM_CHANGED_TR
7201 | CPUM_CHANGED_HIDDEN_SEL_REGS);
7202 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
7203 if ( pVM->hm.s.fNestedPaging
7204 && CPUMIsGuestPagingEnabledEx(pMixedCtx))
7205 {
7206 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
7207 }
7208
7209 Assert(!pVCpu->hm.s.fClearTrapFlag);
7210
7211 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
7212 if (rcExit != VINF_EM_RAW_INTERRUPT)
7213 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
7214
7215 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
7216
7217 /* We do -not- want any longjmp notifications after this! We must return to ring-3 ASAP. */
7218 VMMRZCallRing3RemoveNotification(pVCpu);
7219 VMMRZCallRing3Enable(pVCpu);
7220
7221 return rc;
7222}
7223
7224
7225/**
7226 * VMMRZCallRing3() callback wrapper which saves the guest state before we
7227 * longjump to ring-3 and possibly get preempted.
7228 *
7229 * @returns VBox status code.
7230 * @param pVCpu Pointer to the VMCPU.
7231 * @param enmOperation The operation causing the ring-3 longjump.
7232 * @param pvUser Opaque pointer to the guest-CPU context. The data
7233 * may be out-of-sync. Make sure to update the required
7234 * fields before using them.
7235 */
7236DECLCALLBACK(int) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
7237{
7238 if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
7239 {
7240 /*
7241 * !!! IMPORTANT !!!
7242 * If you modify code here, make sure to check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs
7243 * to be updated too. This is a stripped down version which gets out ASAP trying to not trigger any assertion.
7244 */
7245 VMMRZCallRing3RemoveNotification(pVCpu);
7246 VMMRZCallRing3Disable(pVCpu);
7247 HM_DISABLE_PREEMPT_IF_NEEDED();
7248
7249 PVM pVM = pVCpu->CTX_SUFF(pVM);
7250 if (CPUMIsGuestFPUStateActive(pVCpu))
7251 CPUMR0SaveGuestFPU(pVM, pVCpu, (PCPUMCTX)pvUser);
7252
7253 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
7254
7255#if HC_ARCH_BITS == 64
7256 /* Restore host-state bits that VT-x only restores partially. */
7257 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
7258 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
7259 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
7260 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
7261
7262 /* Restore the host MSRs as we're leaving VT-x context. */
7263 if ( pVM->hm.s.fAllow64BitGuests
7264 && pVCpu->hm.s.vmx.fRestoreHostMsrs)
7265 {
7266 hmR0VmxLazyRestoreHostMsrs(pVCpu);
7267 }
7268#endif
7269 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
7270 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
7271 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
7272 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7273 {
7274 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7275 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
7276 }
7277
7278 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
7279 VMMR0ThreadCtxHooksDeregister(pVCpu);
7280
7281 HMR0LeaveCpu(pVCpu);
7282 HM_RESTORE_PREEMPT_IF_NEEDED();
7283 return VINF_SUCCESS;
7284 }
7285
7286 Assert(pVCpu);
7287 Assert(pvUser);
7288 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7289 HMVMX_ASSERT_PREEMPT_SAFE();
7290
7291 VMMRZCallRing3Disable(pVCpu);
7292 Assert(VMMR0IsLogFlushDisabled(pVCpu));
7293
7294 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32\n enmOperation=%d", pVCpu, pVCpu->idCpu,
7295 enmOperation));
7296
7297 int rc = hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
7298 AssertRCReturn(rc, rc);
7299
7300 VMMRZCallRing3Enable(pVCpu);
7301 return VINF_SUCCESS;
7302}
7303
7304
7305/**
7306 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
7307 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
7308 *
7309 * @param pVCpu Pointer to the VMCPU.
7310 */
7311DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
7312{
7313 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7314 {
7315 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7316 {
7317 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7318 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7319 AssertRC(rc);
7320 Log4(("Setup interrupt-window exiting\n"));
7321 }
7322 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
7323}
7324
7325
7326/**
7327 * Clears the interrupt-window exiting control in the VMCS.
7328 *
7329 * @param pVCpu Pointer to the VMCPU.
7330 */
7331DECLINLINE(void) hmR0VmxClearIntWindowExitVmcs(PVMCPU pVCpu)
7332{
7333 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7334 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7335 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7336 AssertRC(rc);
7337 Log4(("Cleared interrupt-window exiting\n"));
7338}
7339
7340
7341/**
7342 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
7343 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
7344 *
7345 * @param pVCpu Pointer to the VMCPU.
7346 */
7347DECLINLINE(void) hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu)
7348{
7349 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
7350 {
7351 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
7352 {
7353 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
7354 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7355 AssertRC(rc);
7356 Log4(("Setup NMI-window exiting\n"));
7357 }
7358 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
7359}
7360
7361
7362/**
7363 * Clears the NMI-window exiting control in the VMCS.
7364 *
7365 * @param pVCpu Pointer to the VMCPU.
7366 */
7367DECLINLINE(void) hmR0VmxClearNmiWindowExitVmcs(PVMCPU pVCpu)
7368{
7369 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT);
7370 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
7371 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7372 AssertRC(rc);
7373 Log4(("Cleared NMI-window exiting\n"));
7374}
7375
7376
7377/**
7378 * Evaluates the event to be delivered to the guest and sets it as the pending
7379 * event.
7380 *
7381 * @param pVCpu Pointer to the VMCPU.
7382 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7383 * out-of-sync. Make sure to update the required fields
7384 * before using them.
7385 */
7386static void hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7387{
7388 Assert(!pVCpu->hm.s.Event.fPending);
7389
7390 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
7391 uint32_t const uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
7392 bool const fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7393 bool const fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7394 bool const fBlockNmi = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
7395
7396 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7397 Assert(!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
7398 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7399 Assert(!TRPMHasTrap(pVCpu));
7400
7401 /*
7402 * Toggling of interrupt force-flags here is safe since we update TRPM on premature exits
7403 * to ring-3 before executing guest code, see hmR0VmxExitToRing3(). We must NOT restore these force-flags.
7404 */
7405 /** @todo SMI. SMIs take priority over NMIs. */
7406 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */
7407 {
7408 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
7409 if ( !fBlockNmi
7410 && !fBlockSti
7411 && !fBlockMovSS)
7412 {
7413 Log4(("Pending NMI vcpu[%RU32]\n", pVCpu->idCpu));
7414 uint32_t u32IntInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID;
7415 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7416
7417 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7418 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
7419 }
7420 else
7421 hmR0VmxSetNmiWindowExitVmcs(pVCpu);
7422 }
7423 /*
7424 * Check if the guest can receive external interrupts (PIC/APIC). Once we do PDMGetInterrupt() we -must- deliver
7425 * the interrupt ASAP. We must not execute any guest code until we inject the interrupt.
7426 */
7427 else if ( VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
7428 && !pVCpu->hm.s.fSingleInstruction)
7429 {
7430 int rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7431 AssertRC(rc);
7432 bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
7433 if ( !fBlockInt
7434 && !fBlockSti
7435 && !fBlockMovSS)
7436 {
7437 uint8_t u8Interrupt;
7438 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
7439 if (RT_SUCCESS(rc))
7440 {
7441 Log4(("Pending interrupt vcpu[%RU32] u8Interrupt=%#x \n", pVCpu->idCpu, u8Interrupt));
7442 uint32_t u32IntInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID;
7443 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7444
7445 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrfaultAddress */);
7446 }
7447 else
7448 {
7449 /** @todo Does this actually happen? If not turn it into an assertion. */
7450 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
7451 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
7452 }
7453 }
7454 else
7455 hmR0VmxSetIntWindowExitVmcs(pVCpu);
7456 }
7457}
7458
7459
7460/**
7461 * Sets a pending-debug exception to be delivered to the guest if the guest is
7462 * single-stepping.
7463 *
7464 * @param pVCpu Pointer to the VMCPU.
7465 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7466 * out-of-sync. Make sure to update the required fields
7467 * before using them.
7468 */
7469DECLINLINE(void) hmR0VmxSetPendingDebugXcpt(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7470{
7471 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7472 if (pMixedCtx->eflags.Bits.u1TF) /* We don't have any IA32_DEBUGCTL MSR for guests. Treat as all bits 0. */
7473 {
7474 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
7475 AssertRC(rc);
7476 }
7477}
7478
7479
7480/**
7481 * Injects any pending events into the guest if the guest is in a state to
7482 * receive them.
7483 *
7484 * @returns VBox status code (informational status codes included).
7485 * @param pVCpu Pointer to the VMCPU.
7486 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7487 * out-of-sync. Make sure to update the required fields
7488 * before using them.
7489 */
7490static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7491{
7492 HMVMX_ASSERT_PREEMPT_SAFE();
7493 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7494
7495 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
7496 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
7497 bool fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7498 bool fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7499
7500 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7501 Assert(!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
7502 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7503 Assert(!TRPMHasTrap(pVCpu));
7504
7505 int rc = VINF_SUCCESS;
7506 if (pVCpu->hm.s.Event.fPending)
7507 {
7508 /*
7509 * Clear any interrupt-window exiting control if we're going to inject an interrupt. Saves one extra
7510 * VM-exit in situations where we previously setup interrupt-window exiting but got other VM-exits and
7511 * ended up enabling interrupts outside VT-x.
7512 */
7513 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
7514 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)
7515 && uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7516 {
7517 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7518 hmR0VmxClearIntWindowExitVmcs(pVCpu);
7519 }
7520
7521#ifdef VBOX_STRICT
7522 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7523 {
7524 bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
7525 Assert(!fBlockInt);
7526 Assert(!fBlockSti);
7527 Assert(!fBlockMovSS);
7528 }
7529 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
7530 {
7531 bool fBlockNmi = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
7532 Assert(!fBlockSti);
7533 Assert(!fBlockMovSS);
7534 Assert(!fBlockNmi);
7535 }
7536#endif
7537 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#x\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
7538 (uint8_t)uIntType));
7539 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,
7540 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, &uIntrState);
7541 AssertRCReturn(rc, rc);
7542
7543 /* Update the interruptibility-state as it could have been changed by
7544 hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */
7545 fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7546 fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7547
7548#ifdef VBOX_WITH_STATISTICS
7549 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7550 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
7551 else
7552 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
7553#endif
7554 }
7555
7556 /* Deliver pending debug exception if the guest is single-stepping. Evaluate and set the BS bit. */
7557 if ( fBlockSti
7558 || fBlockMovSS)
7559 {
7560 if ( !pVCpu->hm.s.fSingleInstruction
7561 && !DBGFIsStepping(pVCpu))
7562 {
7563 /*
7564 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD,
7565 * VMX_EXIT_MTF, VMX_EXIT_APIC_WRITE and VMX_EXIT_VIRTUALIZED_EOI.
7566 * See Intel spec. 27.3.4 "Saving Non-Register State".
7567 */
7568 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7569 AssertRCReturn(rc2, rc2);
7570 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
7571 }
7572 else if (pMixedCtx->eflags.Bits.u1TF)
7573 {
7574 /*
7575 * We are single-stepping in the hypervisor debugger using EFLAGS.TF. Clear interrupt inhibition as setting the
7576 * BS bit would mean delivering a #DB to the guest upon VM-entry when it shouldn't be.
7577 */
7578 Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG));
7579 uIntrState = 0;
7580 }
7581 }
7582
7583 /*
7584 * There's no need to clear the VM entry-interruption information field here if we're not injecting anything.
7585 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7586 */
7587 int rc2 = hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);
7588 AssertRC(rc2);
7589
7590 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
7591 NOREF(fBlockMovSS); NOREF(fBlockSti);
7592 return rc;
7593}
7594
7595
7596/**
7597 * Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM.
7598 *
7599 * @param pVCpu Pointer to the VMCPU.
7600 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7601 * out-of-sync. Make sure to update the required fields
7602 * before using them.
7603 */
7604DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7605{
7606 NOREF(pMixedCtx);
7607 uint32_t u32IntInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID;
7608 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7609}
7610
7611
7612/**
7613 * Injects a double-fault (#DF) exception into the VM.
7614 *
7615 * @returns VBox status code (informational status code included).
7616 * @param pVCpu Pointer to the VMCPU.
7617 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7618 * out-of-sync. Make sure to update the required fields
7619 * before using them.
7620 */
7621DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t *puIntrState)
7622{
7623 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
7624 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7625 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7626 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
7627 puIntrState);
7628}
7629
7630
7631/**
7632 * Sets a debug (#DB) exception as pending-for-injection into the VM.
7633 *
7634 * @param pVCpu Pointer to the VMCPU.
7635 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7636 * out-of-sync. Make sure to update the required fields
7637 * before using them.
7638 */
7639DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7640{
7641 NOREF(pMixedCtx);
7642 uint32_t u32IntInfo = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID;
7643 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7644 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7645}
7646
7647
7648/**
7649 * Sets an overflow (#OF) exception as pending-for-injection into the VM.
7650 *
7651 * @param pVCpu Pointer to the VMCPU.
7652 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7653 * out-of-sync. Make sure to update the required fields
7654 * before using them.
7655 * @param cbInstr The value of RIP that is to be pushed on the guest
7656 * stack.
7657 */
7658DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
7659{
7660 NOREF(pMixedCtx);
7661 uint32_t u32IntInfo = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID;
7662 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7663 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7664}
7665
7666
7667/**
7668 * Injects a general-protection (#GP) fault into the VM.
7669 *
7670 * @returns VBox status code (informational status code included).
7671 * @param pVCpu Pointer to the VMCPU.
7672 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7673 * out-of-sync. Make sure to update the required fields
7674 * before using them.
7675 * @param fErrorCodeValid Whether the error code is valid (depends on the CPU
7676 * mode, i.e. in real-mode it's not valid).
7677 * @param u32ErrorCode The error code associated with the #GP.
7678 */
7679DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
7680 uint32_t *puIntrState)
7681{
7682 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
7683 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7684 if (fErrorCodeValid)
7685 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7686 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
7687 puIntrState);
7688}
7689
7690
7691/**
7692 * Sets a general-protection (#GP) exception as pending-for-injection into the
7693 * VM.
7694 *
7695 * @param pVCpu Pointer to the VMCPU.
7696 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7697 * out-of-sync. Make sure to update the required fields
7698 * before using them.
7699 * @param u32ErrorCode The error code associated with the #GP.
7700 */
7701DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t u32ErrorCode)
7702{
7703 NOREF(pMixedCtx);
7704 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
7705 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7706 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7707 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */);
7708}
7709
7710
7711/**
7712 * Sets a software interrupt (INTn) as pending-for-injection into the VM.
7713 *
7714 * @param pVCpu Pointer to the VMCPU.
7715 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7716 * out-of-sync. Make sure to update the required fields
7717 * before using them.
7718 * @param uVector The software interrupt vector number.
7719 * @param cbInstr The value of RIP that is to be pushed on the guest
7720 * stack.
7721 */
7722DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
7723{
7724 NOREF(pMixedCtx);
7725 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
7726 if ( uVector == X86_XCPT_BP
7727 || uVector == X86_XCPT_OF)
7728 {
7729 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7730 }
7731 else
7732 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7733 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7734}
7735
7736
7737/**
7738 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
7739 * stack.
7740 *
7741 * @returns VBox status code (information status code included).
7742 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
7743 * @param pVM Pointer to the VM.
7744 * @param pMixedCtx Pointer to the guest-CPU context.
7745 * @param uValue The value to push to the guest stack.
7746 */
7747DECLINLINE(int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
7748{
7749 /*
7750 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
7751 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
7752 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
7753 */
7754 if (pMixedCtx->sp == 1)
7755 return VINF_EM_RESET;
7756 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
7757 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
7758 AssertRCReturn(rc, rc);
7759 return rc;
7760}
7761
7762
7763/**
7764 * Injects an event into the guest upon VM-entry by updating the relevant fields
7765 * in the VM-entry area in the VMCS.
7766 *
7767 * @returns VBox status code (informational error codes included).
7768 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
7769 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
7770 *
7771 * @param pVCpu Pointer to the VMCPU.
7772 * @param pMixedCtx Pointer to the guest-CPU context. The data may
7773 * be out-of-sync. Make sure to update the required
7774 * fields before using them.
7775 * @param u64IntInfo The VM-entry interruption-information field.
7776 * @param cbInstr The VM-entry instruction length in bytes (for
7777 * software interrupts, exceptions and privileged
7778 * software exceptions).
7779 * @param u32ErrCode The VM-entry exception error code.
7780 * @param GCPtrFaultAddress The page-fault address for #PF exceptions.
7781 * @param puIntrState Pointer to the current guest interruptibility-state.
7782 * This interruptibility-state will be updated if
7783 * necessary. This cannot not be NULL.
7784 *
7785 * @remarks Requires CR0!
7786 * @remarks No-long-jump zone!!!
7787 */
7788static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
7789 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState)
7790{
7791 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
7792 AssertMsg(u64IntInfo >> 32 == 0, ("%#RX64\n", u64IntInfo));
7793 Assert(puIntrState);
7794 uint32_t u32IntInfo = (uint32_t)u64IntInfo;
7795
7796 uint32_t const uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntInfo);
7797 uint32_t const uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo);
7798
7799#ifdef VBOX_STRICT
7800 /* Validate the error-code-valid bit for hardware exceptions. */
7801 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT)
7802 {
7803 switch (uVector)
7804 {
7805 case X86_XCPT_PF:
7806 case X86_XCPT_DF:
7807 case X86_XCPT_TS:
7808 case X86_XCPT_NP:
7809 case X86_XCPT_SS:
7810 case X86_XCPT_GP:
7811 case X86_XCPT_AC:
7812 AssertMsg(VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo),
7813 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
7814 /* fallthru */
7815 default:
7816 break;
7817 }
7818 }
7819#endif
7820
7821 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
7822 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
7823 || !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
7824
7825 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
7826
7827 /* We require CR0 to check if the guest is in real-mode. */
7828 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7829 AssertRCReturn(rc, rc);
7830
7831 /*
7832 * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
7833 * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest.
7834 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
7835 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
7836 */
7837 if (CPUMIsGuestInRealModeEx(pMixedCtx))
7838 {
7839 PVM pVM = pVCpu->CTX_SUFF(pVM);
7840 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
7841 {
7842 Assert(PDMVmmDevHeapIsEnabled(pVM));
7843 Assert(pVM->hm.s.vmx.pRealModeTSS);
7844
7845 /* We require RIP, RSP, RFLAGS, CS, IDTR. Save the required ones from the VMCS. */
7846 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7847 rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
7848 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
7849 AssertRCReturn(rc, rc);
7850 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP));
7851
7852 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
7853 size_t const cbIdtEntry = sizeof(X86IDTR16);
7854 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
7855 {
7856 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
7857 if (uVector == X86_XCPT_DF)
7858 return VINF_EM_RESET;
7859 else if (uVector == X86_XCPT_GP)
7860 {
7861 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
7862 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, puIntrState);
7863 }
7864
7865 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
7866 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
7867 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, puIntrState);
7868 }
7869
7870 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
7871 uint16_t uGuestIp = pMixedCtx->ip;
7872 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
7873 {
7874 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
7875 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
7876 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
7877 }
7878 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
7879 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
7880
7881 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
7882 X86IDTR16 IdtEntry;
7883 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
7884 rc = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
7885 AssertRCReturn(rc, rc);
7886
7887 /* Construct the stack frame for the interrupt/exception handler. */
7888 rc = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
7889 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
7890 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
7891 AssertRCReturn(rc, rc);
7892
7893 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
7894 if (rc == VINF_SUCCESS)
7895 {
7896 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
7897 pMixedCtx->rip = IdtEntry.offSel;
7898 pMixedCtx->cs.Sel = IdtEntry.uSel;
7899 pMixedCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
7900 if ( uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
7901 && uVector == X86_XCPT_PF)
7902 {
7903 pMixedCtx->cr2 = GCPtrFaultAddress;
7904 }
7905
7906 /* If any other guest-state bits are changed here, make sure to update
7907 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */
7908 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS
7909 | HM_CHANGED_GUEST_RIP
7910 | HM_CHANGED_GUEST_RFLAGS
7911 | HM_CHANGED_GUEST_RSP);
7912
7913 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
7914 if (*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
7915 {
7916 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
7917 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
7918 Log4(("Clearing inhibition due to STI.\n"));
7919 *puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
7920 }
7921 Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntInfo, u32ErrCode, cbInstr));
7922
7923 /* The event has been truly dispatched. Mark it as no longer pending so we don't attempt to 'undo'
7924 it, if we are returning to ring-3 before executing guest code. */
7925 pVCpu->hm.s.Event.fPending = false;
7926 }
7927 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
7928 return rc;
7929 }
7930 else
7931 {
7932 /*
7933 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
7934 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
7935 */
7936 u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7937 }
7938 }
7939
7940 /* Validate. */
7941 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
7942 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(u32IntInfo)); /* Bit 12 MBZ. */
7943 Assert(!(u32IntInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
7944
7945 /* Inject. */
7946 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
7947 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo))
7948 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
7949 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
7950
7951 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
7952 && uVector == X86_XCPT_PF)
7953 {
7954 pMixedCtx->cr2 = GCPtrFaultAddress;
7955 }
7956
7957 Log4(("Injecting vcpu[%RU32] u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", pVCpu->idCpu,
7958 u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
7959
7960 AssertRCReturn(rc, rc);
7961 return rc;
7962}
7963
7964
7965/**
7966 * Clears the interrupt-window exiting control in the VMCS and if necessary
7967 * clears the current event in the VMCS as well.
7968 *
7969 * @returns VBox status code.
7970 * @param pVCpu Pointer to the VMCPU.
7971 *
7972 * @remarks Use this function only to clear events that have not yet been
7973 * delivered to the guest but are injected in the VMCS!
7974 * @remarks No-long-jump zone!!!
7975 */
7976static void hmR0VmxClearEventVmcs(PVMCPU pVCpu)
7977{
7978 int rc;
7979 Log4Func(("vcpu[%d]\n", pVCpu->idCpu));
7980
7981 /* Clear interrupt-window exiting control. */
7982 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)
7983 {
7984 hmR0VmxClearIntWindowExitVmcs(pVCpu);
7985 Assert(!pVCpu->hm.s.Event.fPending);
7986 }
7987
7988 /* Clear NMI-window exiting control. */
7989 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)
7990 {
7991 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
7992 Assert(!pVCpu->hm.s.Event.fPending);
7993 }
7994
7995 if (!pVCpu->hm.s.Event.fPending)
7996 return;
7997
7998#ifdef VBOX_STRICT
7999 uint32_t u32EntryInfo;
8000 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
8001 AssertRC(rc);
8002 Assert(VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo));
8003#endif
8004
8005 /* Clear the entry-interruption field (including the valid bit). */
8006 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0);
8007 AssertRC(rc);
8008
8009 /* Clear the pending debug exception field. */
8010 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
8011 AssertRC(rc);
8012
8013 /* We deliberately don't clear "hm.s.Event.fPending" here, it's taken
8014 care of in hmR0VmxExitToRing3() converting the pending event to TRPM. */
8015}
8016
8017
8018/**
8019 * Enters the VT-x session.
8020 *
8021 * @returns VBox status code.
8022 * @param pVM Pointer to the VM.
8023 * @param pVCpu Pointer to the VMCPU.
8024 * @param pCpu Pointer to the CPU info struct.
8025 */
8026VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
8027{
8028 AssertPtr(pVM);
8029 AssertPtr(pVCpu);
8030 Assert(pVM->hm.s.vmx.fSupported);
8031 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8032 NOREF(pCpu); NOREF(pVM);
8033
8034 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8035 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
8036
8037#ifdef VBOX_STRICT
8038 /* Make sure we're in VMX root mode. */
8039 RTCCUINTREG u32HostCR4 = ASMGetCR4();
8040 if (!(u32HostCR4 & X86_CR4_VMXE))
8041 {
8042 LogRel(("VMXR0Enter: X86_CR4_VMXE bit in CR4 is not set!\n"));
8043 return VERR_VMX_X86_CR4_VMXE_CLEARED;
8044 }
8045#endif
8046
8047 /*
8048 * Load the VCPU's VMCS as the current (and active) one.
8049 */
8050 Assert(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR);
8051 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
8052 if (RT_FAILURE(rc))
8053 return rc;
8054
8055 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
8056 pVCpu->hm.s.fLeaveDone = false;
8057 Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
8058
8059 return VINF_SUCCESS;
8060}
8061
8062
8063/**
8064 * The thread-context callback (only on platforms which support it).
8065 *
8066 * @param enmEvent The thread-context event.
8067 * @param pVCpu Pointer to the VMCPU.
8068 * @param fGlobalInit Whether global VT-x/AMD-V init. was used.
8069 * @thread EMT(pVCpu)
8070 */
8071VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
8072{
8073 NOREF(fGlobalInit);
8074
8075 switch (enmEvent)
8076 {
8077 case RTTHREADCTXEVENT_PREEMPTING:
8078 {
8079 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8080 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
8081 VMCPU_ASSERT_EMT(pVCpu);
8082
8083 PVM pVM = pVCpu->CTX_SUFF(pVM);
8084 PCPUMCTX pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu);
8085
8086 /* No longjmps (logger flushes, locks) in this fragile context. */
8087 VMMRZCallRing3Disable(pVCpu);
8088 Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId()));
8089
8090 /*
8091 * Restore host-state (FPU, debug etc.)
8092 */
8093 if (!pVCpu->hm.s.fLeaveDone)
8094 {
8095 /* Do -not- save guest-state here as we might already be in the middle of saving it (esp. bad if we are
8096 holding the PGM lock while saving the guest state (see hmR0VmxSaveGuestControlRegs()). */
8097 hmR0VmxLeave(pVM, pVCpu, pMixedCtx, false /* fSaveGuestState */);
8098 pVCpu->hm.s.fLeaveDone = true;
8099 }
8100
8101 /* Leave HM context, takes care of local init (term). */
8102 int rc = HMR0LeaveCpu(pVCpu);
8103 AssertRC(rc); NOREF(rc);
8104
8105 /* Restore longjmp state. */
8106 VMMRZCallRing3Enable(pVCpu);
8107 STAM_COUNTER_INC(&pVCpu->hm.s.StatPreemptPreempting);
8108 break;
8109 }
8110
8111 case RTTHREADCTXEVENT_RESUMED:
8112 {
8113 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8114 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
8115 VMCPU_ASSERT_EMT(pVCpu);
8116
8117 /* No longjmps here, as we don't want to trigger preemption (& its hook) while resuming. */
8118 VMMRZCallRing3Disable(pVCpu);
8119 Log4Func(("Resumed: HostCpuId=%u\n", RTMpCpuId()));
8120
8121 /* Initialize the bare minimum state required for HM. This takes care of
8122 initializing VT-x if necessary (onlined CPUs, local init etc.) */
8123 int rc = HMR0EnterCpu(pVCpu);
8124 AssertRC(rc);
8125 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
8126
8127 /* Load the active VMCS as the current one. */
8128 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR)
8129 {
8130 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
8131 AssertRC(rc); NOREF(rc);
8132 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
8133 Log4Func(("Resumed: Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
8134 }
8135 pVCpu->hm.s.fLeaveDone = false;
8136
8137 /* Restore longjmp state. */
8138 VMMRZCallRing3Enable(pVCpu);
8139 break;
8140 }
8141
8142 default:
8143 break;
8144 }
8145}
8146
8147
8148/**
8149 * Saves the host state in the VMCS host-state.
8150 * Sets up the VM-exit MSR-load area.
8151 *
8152 * The CPU state will be loaded from these fields on every successful VM-exit.
8153 *
8154 * @returns VBox status code.
8155 * @param pVM Pointer to the VM.
8156 * @param pVCpu Pointer to the VMCPU.
8157 *
8158 * @remarks No-long-jump zone!!!
8159 */
8160static int hmR0VmxSaveHostState(PVM pVM, PVMCPU pVCpu)
8161{
8162 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8163
8164 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
8165 return VINF_SUCCESS;
8166
8167 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
8168 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8169
8170 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
8171 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8172
8173 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
8174 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8175
8176 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
8177 return rc;
8178}
8179
8180
8181/**
8182 * Saves the host state in the VMCS host-state.
8183 *
8184 * @returns VBox status code.
8185 * @param pVM Pointer to the VM.
8186 * @param pVCpu Pointer to the VMCPU.
8187 *
8188 * @remarks No-long-jump zone!!!
8189 */
8190VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
8191{
8192 AssertPtr(pVM);
8193 AssertPtr(pVCpu);
8194
8195 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8196
8197 /* Save the host state here while entering HM context. When thread-context hooks are used, we might get preempted
8198 and have to resave the host state but most of the time we won't be, so do it here before we disable interrupts. */
8199 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8200 return hmR0VmxSaveHostState(pVM, pVCpu);
8201}
8202
8203
8204/**
8205 * Loads the guest state into the VMCS guest-state area. The CPU state will be
8206 * loaded from these fields on every successful VM-entry.
8207 *
8208 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas.
8209 * Sets up the VM-entry controls.
8210 * Sets up the appropriate VMX non-root function to execute guest code based on
8211 * the guest CPU mode.
8212 *
8213 * @returns VBox status code.
8214 * @param pVM Pointer to the VM.
8215 * @param pVCpu Pointer to the VMCPU.
8216 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8217 * out-of-sync. Make sure to update the required fields
8218 * before using them.
8219 *
8220 * @remarks No-long-jump zone!!!
8221 */
8222static int hmR0VmxLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
8223{
8224 AssertPtr(pVM);
8225 AssertPtr(pVCpu);
8226 AssertPtr(pMixedCtx);
8227 HMVMX_ASSERT_PREEMPT_SAFE();
8228
8229#ifdef LOG_ENABLED
8230 /** @todo r=ramshankar: I'm not able to use VMMRZCallRing3Disable() here,
8231 * probably not initialized yet? Anyway this will do for now.
8232 *
8233 * Update: Should be possible once VMXR0LoadGuestState() is removed as an
8234 * interface and disable ring-3 calls when thread-context hooks are not
8235 * available. */
8236 bool fCallerDisabledLogFlush = VMMR0IsLogFlushDisabled(pVCpu);
8237 VMMR0LogFlushDisable(pVCpu);
8238#endif
8239
8240 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8241
8242 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
8243
8244 /* Determine real-on-v86 mode. */
8245 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
8246 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
8247 && CPUMIsGuestInRealModeEx(pMixedCtx))
8248 {
8249 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
8250 }
8251
8252 /*
8253 * Load the guest-state into the VMCS.
8254 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
8255 * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
8256 */
8257 int rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
8258 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8259
8260 /* This needs to be done after hmR0VmxSetupVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */
8261 rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
8262 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8263
8264 /* This needs to be done after hmR0VmxSetupVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */
8265 rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
8266 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8267
8268 rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
8269 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8270
8271 rc = hmR0VmxLoadGuestCR3AndCR4(pVCpu, pMixedCtx);
8272 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestCR3AndCR4: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8273
8274 /* Assumes pMixedCtx->cr0 is up-to-date (strict builds require CR0 for segment register validation checks). */
8275 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
8276 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8277
8278 /* This needs to be done after hmR0VmxLoadGuestEntryCtls() and hmR0VmxLoadGuestExitCtls() as it may alter controls if we
8279 determine we don't have to swap EFER after all. */
8280 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
8281 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadSharedMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8282
8283 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
8284 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8285
8286 /*
8287 * Loading Rflags here is fine, even though Rflags.TF might depend on guest debug state (which is not loaded here).
8288 * It is re-evaluated and updated if necessary in hmR0VmxLoadSharedState().
8289 */
8290 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
8291 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestRipRspRflags! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8292
8293 /* Clear any unused and reserved bits. */
8294 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
8295
8296#ifdef LOG_ENABLED
8297 /* Only reenable log-flushing if the caller has it enabled. */
8298 if (!fCallerDisabledLogFlush)
8299 VMMR0LogFlushEnable(pVCpu);
8300#endif
8301
8302 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
8303 return rc;
8304}
8305
8306
8307/**
8308 * Loads the state shared between the host and guest into the VMCS.
8309 *
8310 * @param pVM Pointer to the VM.
8311 * @param pVCpu Pointer to the VMCPU.
8312 * @param pCtx Pointer to the guest-CPU context.
8313 *
8314 * @remarks No-long-jump zone!!!
8315 */
8316static void hmR0VmxLoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8317{
8318 NOREF(pVM);
8319
8320 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8321 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8322
8323 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
8324 {
8325 int rc = hmR0VmxLoadSharedCR0(pVCpu, pCtx);
8326 AssertRC(rc);
8327 }
8328
8329 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
8330 {
8331 int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx);
8332 AssertRC(rc);
8333
8334 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
8335 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
8336 {
8337 rc = hmR0VmxLoadGuestRflags(pVCpu, pCtx);
8338 AssertRC(rc);
8339 }
8340 }
8341
8342 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS))
8343 {
8344#if HC_ARCH_BITS == 64
8345 if (pVM->hm.s.fAllow64BitGuests)
8346 hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
8347#endif
8348 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
8349 }
8350
8351 AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),
8352 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8353}
8354
8355
8356/**
8357 * Worker for loading the guest-state bits in the inner VT-x execution loop.
8358 *
8359 * @param pVM Pointer to the VM.
8360 * @param pVCpu Pointer to the VMCPU.
8361 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8362 * out-of-sync. Make sure to update the required fields
8363 * before using them.
8364 */
8365DECLINLINE(void) hmR0VmxLoadGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
8366{
8367 HMVMX_ASSERT_PREEMPT_SAFE();
8368
8369 Log5(("LoadFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8370#ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
8371 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
8372#endif
8373
8374 if (HMCPU_CF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP))
8375 {
8376 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
8377 AssertRC(rc);
8378 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
8379 }
8380 else if (HMCPU_CF_VALUE(pVCpu))
8381 {
8382 int rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx);
8383 AssertRC(rc);
8384 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
8385 }
8386
8387 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
8388 AssertMsg( !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)
8389 || HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
8390 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8391}
8392
8393
8394/**
8395 * Does the preparations before executing guest code in VT-x.
8396 *
8397 * This may cause longjmps to ring-3 and may even result in rescheduling to the
8398 * recompiler. We must be cautious what we do here regarding committing
8399 * guest-state information into the VMCS assuming we assuredly execute the
8400 * guest in VT-x mode. If we fall back to the recompiler after updating the VMCS
8401 * and clearing the common-state (TRPM/forceflags), we must undo those changes
8402 * so that the recompiler can (and should) use them when it resumes guest
8403 * execution. Otherwise such operations must be done when we can no longer
8404 * exit to ring-3.
8405 *
8406 * @returns Strict VBox status code.
8407 * @retval VINF_SUCCESS if we can proceed with running the guest, interrupts
8408 * have been disabled.
8409 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a
8410 * double-fault into the guest.
8411 * @retval VINF_* scheduling changes, we have to go back to ring-3.
8412 *
8413 * @param pVM Pointer to the VM.
8414 * @param pVCpu Pointer to the VMCPU.
8415 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8416 * out-of-sync. Make sure to update the required fields
8417 * before using them.
8418 * @param pVmxTransient Pointer to the VMX transient structure.
8419 */
8420static int hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8421{
8422 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8423
8424#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
8425 PGMRZDynMapFlushAutoSet(pVCpu);
8426#endif
8427
8428 /* Check force flag actions that might require us to go back to ring-3. */
8429 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx);
8430 if (rc != VINF_SUCCESS)
8431 return rc;
8432
8433#ifndef IEM_VERIFICATION_MODE_FULL
8434 /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
8435 if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
8436 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
8437 {
8438 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
8439 RTGCPHYS GCPhysApicBase;
8440 GCPhysApicBase = pMixedCtx->msrApicBase;
8441 GCPhysApicBase &= PAGE_BASE_GC_MASK;
8442
8443 /* Unalias any existing mapping. */
8444 rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
8445 AssertRCReturn(rc, rc);
8446
8447 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
8448 Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));
8449 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
8450 AssertRCReturn(rc, rc);
8451
8452 pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
8453 }
8454#endif /* !IEM_VERIFICATION_MODE_FULL */
8455
8456 if (TRPMHasTrap(pVCpu))
8457 hmR0VmxTrpmTrapToPendingEvent(pVCpu);
8458 else if (!pVCpu->hm.s.Event.fPending)
8459 hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);
8460
8461 /*
8462 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus needs to be done with
8463 * longjmps or interrupts + preemption enabled. Event injection might also result in triple-faulting the VM.
8464 */
8465 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx);
8466 if (RT_UNLIKELY(rc != VINF_SUCCESS))
8467 {
8468 Assert(rc == VINF_EM_RESET);
8469 return rc;
8470 }
8471
8472 /*
8473 * Load the guest state bits, we can handle longjmps/getting preempted here.
8474 *
8475 * If we are injecting events to a real-on-v86 mode guest, we will have to update
8476 * RIP and some segment registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs().
8477 * Hence, this needs to be done -after- injection of events.
8478 */
8479 hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx);
8480
8481 /*
8482 * No longjmps to ring-3 from this point on!!!
8483 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
8484 * This also disables flushing of the R0-logger instance (if any).
8485 */
8486 VMMRZCallRing3Disable(pVCpu);
8487
8488 /*
8489 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.)
8490 * when thread-context hooks aren't used and we've been running with preemption disabled for a while.
8491 *
8492 * We need to check for force-flags that could've possible been altered since we last checked them (e.g.
8493 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}).
8494 *
8495 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before
8496 * executing guest code.
8497 */
8498 pVmxTransient->uEflags = ASMIntDisableFlags();
8499 if ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
8500 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
8501 {
8502 hmR0VmxClearEventVmcs(pVCpu);
8503 ASMSetFlags(pVmxTransient->uEflags);
8504 VMMRZCallRing3Enable(pVCpu);
8505 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
8506 return VINF_EM_RAW_TO_R3;
8507 }
8508
8509 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
8510 {
8511 hmR0VmxClearEventVmcs(pVCpu);
8512 ASMSetFlags(pVmxTransient->uEflags);
8513 VMMRZCallRing3Enable(pVCpu);
8514 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
8515 return VINF_EM_RAW_INTERRUPT;
8516 }
8517
8518 /* We've injected any pending events. This is really the point of no return (to ring-3). */
8519 pVCpu->hm.s.Event.fPending = false;
8520
8521 return VINF_SUCCESS;
8522}
8523
8524
8525/**
8526 * Prepares to run guest code in VT-x and we've committed to doing so. This
8527 * means there is no backing out to ring-3 or anywhere else at this
8528 * point.
8529 *
8530 * @param pVM Pointer to the VM.
8531 * @param pVCpu Pointer to the VMCPU.
8532 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8533 * out-of-sync. Make sure to update the required fields
8534 * before using them.
8535 * @param pVmxTransient Pointer to the VMX transient structure.
8536 *
8537 * @remarks Called with preemption disabled.
8538 * @remarks No-long-jump zone!!!
8539 */
8540static void hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8541{
8542 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8543 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8544 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8545
8546 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8547 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
8548
8549#ifdef HMVMX_ALWAYS_SWAP_FPU_STATE
8550 if (!CPUMIsGuestFPUStateActive(pVCpu))
8551 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8552 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8553#endif
8554
8555 if ( pVCpu->hm.s.fUseGuestFpu
8556 && !CPUMIsGuestFPUStateActive(pVCpu))
8557 {
8558 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8559 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
8560 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8561 }
8562
8563 /*
8564 * Lazy-update of the host MSRs values in the auto-load/store MSR area.
8565 */
8566 if ( !pVCpu->hm.s.vmx.fUpdatedHostMsrs
8567 && pVCpu->hm.s.vmx.cMsrs > 0)
8568 {
8569 hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu);
8570 }
8571
8572 /*
8573 * Load the host state bits as we may've been preempted (only happens when
8574 * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM).
8575 */
8576 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
8577 {
8578 /* This ASSUMES that pfnStartVM has been set up already. */
8579 int rc = hmR0VmxSaveHostState(pVM, pVCpu);
8580 AssertRC(rc);
8581 STAM_COUNTER_INC(&pVCpu->hm.s.StatPreemptSaveHostState);
8582 }
8583 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT));
8584
8585 /*
8586 * Load the state shared between host and guest (FPU, debug, lazy MSRs).
8587 */
8588 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
8589 hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx);
8590 AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8591
8592 /* Store status of the shared guest-host state at the time of VM-entry. */
8593#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
8594 if (CPUMIsGuestInLongModeEx(pMixedCtx))
8595 {
8596 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
8597 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
8598 }
8599 else
8600#endif
8601 {
8602 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
8603 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
8604 }
8605 pVmxTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu);
8606
8607 /*
8608 * Cache the TPR-shadow for checking on every VM-exit if it might have changed.
8609 */
8610 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8611 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
8612
8613 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
8614 RTCPUID idCurrentCpu = pCpu->idCpu;
8615 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
8616 || idCurrentCpu != pVCpu->hm.s.idLastCpu)
8617 {
8618 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu);
8619 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
8620 }
8621
8622 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
8623 hmR0VmxFlushTaggedTlb(pVCpu, pCpu); /* Invalidate the appropriate guest entries from the TLB. */
8624 Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
8625 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Update the error reporting info. with the current host CPU. */
8626
8627 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
8628
8629 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
8630 to start executing. */
8631
8632 /*
8633 * Load the TSC_AUX MSR when we are not intercepting RDTSCP.
8634 */
8635 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
8636 {
8637 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8638 {
8639 int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
8640 AssertRC(rc2);
8641 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS));
8642 bool fMsrUpdated = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMR0GetGuestTscAux(pVCpu),
8643 true /* fUpdateHostMsr */);
8644 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8645 /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */
8646 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
8647 }
8648 else
8649 {
8650 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX);
8651 Assert(!pVCpu->hm.s.vmx.cMsrs || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8652 }
8653 }
8654
8655#ifdef VBOX_STRICT
8656 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu);
8657 hmR0VmxCheckHostEferMsr(pVCpu);
8658 AssertRC(hmR0VmxCheckVmcsCtls(pVCpu));
8659#endif
8660#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
8661 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);
8662 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
8663 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
8664#endif
8665}
8666
8667
8668/**
8669 * Performs some essential restoration of state after running guest code in
8670 * VT-x.
8671 *
8672 * @param pVM Pointer to the VM.
8673 * @param pVCpu Pointer to the VMCPU.
8674 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
8675 * out-of-sync. Make sure to update the required fields
8676 * before using them.
8677 * @param pVmxTransient Pointer to the VMX transient structure.
8678 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
8679 *
8680 * @remarks Called with interrupts disabled, and returns with interrups enabled!
8681 *
8682 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
8683 * unconditionally when it is safe to do so.
8684 */
8685static void hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
8686{
8687 NOREF(pVM);
8688
8689 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8690
8691 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
8692 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
8693 HMVMXCPU_GST_RESET_TO(pVCpu, 0); /* Exits/longjmps to ring-3 requires saving the guest state. */
8694 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
8695 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
8696
8697 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8698 {
8699 /** @todo Find a way to fix hardcoding a guestimate. */
8700 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC()
8701 + pVCpu->hm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
8702 }
8703
8704 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
8705 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
8706 Assert(!(ASMGetFlags() & X86_EFL_IF));
8707 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8708
8709#ifdef HMVMX_ALWAYS_SWAP_FPU_STATE
8710 if (CPUMIsGuestFPUStateActive(pVCpu))
8711 {
8712 hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8713 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
8714 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8715 }
8716#endif
8717
8718#if HC_ARCH_BITS == 64
8719 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Host state messed up by VT-x, we must restore. */
8720#endif
8721 pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
8722#ifdef VBOX_STRICT
8723 hmR0VmxCheckHostEferMsr(pVCpu); /* Verify that VMRUN/VMLAUNCH didn't modify host EFER. */
8724#endif
8725 ASMSetFlags(pVmxTransient->uEflags); /* Enable interrupts. */
8726 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
8727
8728 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
8729 uint32_t uExitReason;
8730 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
8731 rc |= hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
8732 AssertRC(rc);
8733 pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
8734 pVmxTransient->fVMEntryFailed = VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uEntryIntInfo);
8735
8736 /* Update the VM-exit history array. */
8737 HMCPU_EXIT_HISTORY_ADD(pVCpu, pVmxTransient->uExitReason);
8738
8739 /* If the VMLAUNCH/VMRESUME failed, we can bail out early. This does -not- cover VMX_EXIT_ERR_*. */
8740 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
8741 {
8742 Log4(("VM-entry failure: pVCpu=%p idCpu=%RU32 rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", pVCpu, pVCpu->idCpu, rcVMRun,
8743 pVmxTransient->fVMEntryFailed));
8744 return;
8745 }
8746
8747 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
8748 {
8749 /** @todo We can optimize this by only syncing with our force-flags when
8750 * really needed and keeping the VMCS state as it is for most
8751 * VM-exits. */
8752 /* Update the guest interruptibility-state from the VMCS. */
8753 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
8754
8755#if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
8756 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8757 AssertRC(rc);
8758#elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
8759 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8760 AssertRC(rc);
8761#endif
8762
8763 /*
8764 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
8765 * we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3 and which is
8766 * why it's done here as it's easier and no less efficient to deal with it here than making hmR0VmxSaveGuestState()
8767 * cope with longjmps safely (see VMCPU_FF_HM_UPDATE_CR3 handling).
8768 */
8769 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8770 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
8771 {
8772 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
8773 AssertRC(rc);
8774 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
8775 }
8776 }
8777}
8778
8779
8780/**
8781 * Runs the guest code using VT-x the normal way.
8782 *
8783 * @returns VBox status code.
8784 * @param pVM Pointer to the VM.
8785 * @param pVCpu Pointer to the VMCPU.
8786 * @param pCtx Pointer to the guest-CPU context.
8787 *
8788 * @note Mostly the same as hmR0VmxRunGuestCodeStep().
8789 */
8790static int hmR0VmxRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8791{
8792 VMXTRANSIENT VmxTransient;
8793 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
8794 int rc = VERR_INTERNAL_ERROR_5;
8795 uint32_t cLoops = 0;
8796
8797 for (;; cLoops++)
8798 {
8799 Assert(!HMR0SuspendPending());
8800 HMVMX_ASSERT_CPU_SAFE();
8801
8802 /* Preparatory work for running guest code, this may force us to return
8803 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
8804 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
8805 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
8806 if (rc != VINF_SUCCESS)
8807 break;
8808
8809 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
8810 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
8811 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
8812
8813 /* Restore any residual host-state and save any bits shared between host
8814 and guest into the guest-CPU state. Re-enables interrupts! */
8815 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
8816
8817 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
8818 if (RT_UNLIKELY(rc != VINF_SUCCESS))
8819 {
8820 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
8821 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
8822 return rc;
8823 }
8824
8825 /* Handle the VM-exit. */
8826 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
8827 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
8828 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
8829 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
8830 HMVMX_START_EXIT_DISPATCH_PROF();
8831#ifdef HMVMX_USE_FUNCTION_TABLE
8832 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
8833#else
8834 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
8835#endif
8836 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
8837 if (rc != VINF_SUCCESS)
8838 break;
8839 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
8840 {
8841 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
8842 rc = VINF_EM_RAW_INTERRUPT;
8843 break;
8844 }
8845 }
8846
8847 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
8848 return rc;
8849}
8850
8851
8852/**
8853 * Single steps guest code using VT-x.
8854 *
8855 * @returns VBox status code.
8856 * @param pVM Pointer to the VM.
8857 * @param pVCpu Pointer to the VMCPU.
8858 * @param pCtx Pointer to the guest-CPU context.
8859 *
8860 * @note Mostly the same as hmR0VmxRunGuestCodeNormal().
8861 */
8862static int hmR0VmxRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8863{
8864 VMXTRANSIENT VmxTransient;
8865 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
8866 int rc = VERR_INTERNAL_ERROR_5;
8867 uint32_t cLoops = 0;
8868 uint16_t uCsStart = pCtx->cs.Sel;
8869 uint64_t uRipStart = pCtx->rip;
8870
8871 for (;; cLoops++)
8872 {
8873 Assert(!HMR0SuspendPending());
8874 HMVMX_ASSERT_CPU_SAFE();
8875
8876 /* Preparatory work for running guest code, this may force us to return
8877 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
8878 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
8879 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
8880 if (rc != VINF_SUCCESS)
8881 break;
8882
8883 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
8884 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
8885 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
8886
8887 /* Restore any residual host-state and save any bits shared between host
8888 and guest into the guest-CPU state. Re-enables interrupts! */
8889 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
8890
8891 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
8892 if (RT_UNLIKELY(rc != VINF_SUCCESS))
8893 {
8894 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
8895 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
8896 return rc;
8897 }
8898
8899 /* Handle the VM-exit. */
8900 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
8901 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
8902 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
8903 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
8904 HMVMX_START_EXIT_DISPATCH_PROF();
8905#ifdef HMVMX_USE_FUNCTION_TABLE
8906 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
8907#else
8908 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
8909#endif
8910 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
8911 if (rc != VINF_SUCCESS)
8912 break;
8913 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
8914 {
8915 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
8916 rc = VINF_EM_RAW_INTERRUPT;
8917 break;
8918 }
8919
8920 /*
8921 * Did the RIP change, if so, consider it a single step.
8922 * Otherwise, make sure one of the TFs gets set.
8923 */
8924 int rc2 = hmR0VmxSaveGuestRip(pVCpu, pCtx);
8925 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pCtx);
8926 AssertRCReturn(rc2, rc2);
8927 if ( pCtx->rip != uRipStart
8928 || pCtx->cs.Sel != uCsStart)
8929 {
8930 rc = VINF_EM_DBG_STEPPED;
8931 break;
8932 }
8933 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
8934 }
8935
8936 /*
8937 * Clear the X86_EFL_TF if necessary.
8938 */
8939 if (pVCpu->hm.s.fClearTrapFlag)
8940 {
8941 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pCtx);
8942 AssertRCReturn(rc2, rc2);
8943 pVCpu->hm.s.fClearTrapFlag = false;
8944 pCtx->eflags.Bits.u1TF = 0;
8945 }
8946 /** @todo there seems to be issues with the resume flag when the monitor trap
8947 * flag is pending without being used. Seen early in bios init when
8948 * accessing APIC page in protected mode. */
8949
8950 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
8951 return rc;
8952}
8953
8954
8955/**
8956 * Runs the guest code using VT-x.
8957 *
8958 * @returns VBox status code.
8959 * @param pVM Pointer to the VM.
8960 * @param pVCpu Pointer to the VMCPU.
8961 * @param pCtx Pointer to the guest-CPU context.
8962 */
8963VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8964{
8965 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8966 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
8967 HMVMX_ASSERT_PREEMPT_SAFE();
8968
8969 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pCtx);
8970
8971 int rc;
8972 if (!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu))
8973 rc = hmR0VmxRunGuestCodeNormal(pVM, pVCpu, pCtx);
8974 else
8975 rc = hmR0VmxRunGuestCodeStep(pVM, pVCpu, pCtx);
8976
8977 if (rc == VERR_EM_INTERPRETER)
8978 rc = VINF_EM_RAW_EMULATE_INSTR;
8979 else if (rc == VINF_EM_RESET)
8980 rc = VINF_EM_TRIPLE_FAULT;
8981
8982 int rc2 = hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rc);
8983 if (RT_FAILURE(rc2))
8984 {
8985 pVCpu->hm.s.u32HMError = rc;
8986 rc = rc2;
8987 }
8988 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
8989 return rc;
8990}
8991
8992
8993#ifndef HMVMX_USE_FUNCTION_TABLE
8994DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
8995{
8996#ifdef DEBUG_ramshankar
8997# define SVVMCS() do { int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); AssertRC(rc2); } while (0)
8998# define LDVMCS() do { HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); } while (0)
8999#endif
9000 int rc;
9001 switch (rcReason)
9002 {
9003 case VMX_EXIT_EPT_MISCONFIG: /* SVVMCS(); */ rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9004 case VMX_EXIT_EPT_VIOLATION: /* SVVMCS(); */ rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9005 case VMX_EXIT_IO_INSTR: /* SVVMCS(); */ rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9006 case VMX_EXIT_CPUID: /* SVVMCS(); */ rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9007 case VMX_EXIT_RDTSC: /* SVVMCS(); */ rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9008 case VMX_EXIT_RDTSCP: /* SVVMCS(); */ rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9009 case VMX_EXIT_APIC_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9010 case VMX_EXIT_XCPT_OR_NMI: /* SVVMCS(); */ rc = hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9011 case VMX_EXIT_MOV_CRX: /* SVVMCS(); */ rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9012 case VMX_EXIT_EXT_INT: /* SVVMCS(); */ rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9013 case VMX_EXIT_INT_WINDOW: /* SVVMCS(); */ rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9014 case VMX_EXIT_MWAIT: /* SVVMCS(); */ rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9015 case VMX_EXIT_MONITOR: /* SVVMCS(); */ rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9016 case VMX_EXIT_TASK_SWITCH: /* SVVMCS(); */ rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9017 case VMX_EXIT_PREEMPT_TIMER: /* SVVMCS(); */ rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9018 case VMX_EXIT_RDMSR: /* SVVMCS(); */ rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9019 case VMX_EXIT_WRMSR: /* SVVMCS(); */ rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9020 case VMX_EXIT_MOV_DRX: /* SVVMCS(); */ rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9021 case VMX_EXIT_TPR_BELOW_THRESHOLD: /* SVVMCS(); */ rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9022 case VMX_EXIT_HLT: /* SVVMCS(); */ rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9023 case VMX_EXIT_INVD: /* SVVMCS(); */ rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9024 case VMX_EXIT_INVLPG: /* SVVMCS(); */ rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9025 case VMX_EXIT_RSM: /* SVVMCS(); */ rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9026 case VMX_EXIT_MTF: /* SVVMCS(); */ rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9027 case VMX_EXIT_PAUSE: /* SVVMCS(); */ rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9028 case VMX_EXIT_XDTR_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9029 case VMX_EXIT_TR_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9030 case VMX_EXIT_WBINVD: /* SVVMCS(); */ rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9031 case VMX_EXIT_XSETBV: /* SVVMCS(); */ rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9032 case VMX_EXIT_RDRAND: /* SVVMCS(); */ rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9033 case VMX_EXIT_INVPCID: /* SVVMCS(); */ rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9034 case VMX_EXIT_GETSEC: /* SVVMCS(); */ rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9035 case VMX_EXIT_RDPMC: /* SVVMCS(); */ rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9036 case VMX_EXIT_VMCALL: /* SVVMCS(); */ rc = hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9037
9038 case VMX_EXIT_TRIPLE_FAULT: rc = hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); break;
9039 case VMX_EXIT_NMI_WINDOW: rc = hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient); break;
9040 case VMX_EXIT_INIT_SIGNAL: rc = hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient); break;
9041 case VMX_EXIT_SIPI: rc = hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient); break;
9042 case VMX_EXIT_IO_SMI: rc = hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient); break;
9043 case VMX_EXIT_SMI: rc = hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient); break;
9044 case VMX_EXIT_ERR_MSR_LOAD: rc = hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient); break;
9045 case VMX_EXIT_ERR_INVALID_GUEST_STATE: rc = hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient); break;
9046 case VMX_EXIT_ERR_MACHINE_CHECK: rc = hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient); break;
9047
9048 case VMX_EXIT_VMCLEAR:
9049 case VMX_EXIT_VMLAUNCH:
9050 case VMX_EXIT_VMPTRLD:
9051 case VMX_EXIT_VMPTRST:
9052 case VMX_EXIT_VMREAD:
9053 case VMX_EXIT_VMRESUME:
9054 case VMX_EXIT_VMWRITE:
9055 case VMX_EXIT_VMXOFF:
9056 case VMX_EXIT_VMXON:
9057 case VMX_EXIT_INVEPT:
9058 case VMX_EXIT_INVVPID:
9059 case VMX_EXIT_VMFUNC:
9060 rc = hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
9061 break;
9062 default:
9063 rc = hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
9064 break;
9065 }
9066 return rc;
9067}
9068#endif
9069
9070#ifdef DEBUG
9071/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
9072# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
9073 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
9074
9075# define HMVMX_ASSERT_PREEMPT_CPUID() \
9076 do \
9077 { \
9078 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
9079 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
9080 } while (0)
9081
9082# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
9083 do { \
9084 AssertPtr(pVCpu); \
9085 AssertPtr(pMixedCtx); \
9086 AssertPtr(pVmxTransient); \
9087 Assert(pVmxTransient->fVMEntryFailed == false); \
9088 Assert(ASMIntAreEnabled()); \
9089 HMVMX_ASSERT_PREEMPT_SAFE(); \
9090 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
9091 Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", pVCpu->idCpu)); \
9092 HMVMX_ASSERT_PREEMPT_SAFE(); \
9093 if (VMMR0IsLogFlushDisabled(pVCpu)) \
9094 HMVMX_ASSERT_PREEMPT_CPUID(); \
9095 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
9096 } while (0)
9097
9098# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
9099 do { \
9100 Log4Func(("\n")); \
9101 } while (0)
9102#else /* Release builds */
9103# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
9104 do { \
9105 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
9106 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient); \
9107 } while (0)
9108# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while (0)
9109#endif
9110
9111
9112/**
9113 * Advances the guest RIP after reading it from the VMCS.
9114 *
9115 * @returns VBox status code.
9116 * @param pVCpu Pointer to the VMCPU.
9117 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
9118 * out-of-sync. Make sure to update the required fields
9119 * before using them.
9120 * @param pVmxTransient Pointer to the VMX transient structure.
9121 *
9122 * @remarks No-long-jump zone!!!
9123 */
9124DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9125{
9126 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
9127 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
9128 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
9129 AssertRCReturn(rc, rc);
9130
9131 pMixedCtx->rip += pVmxTransient->cbInstr;
9132 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
9133
9134 /*
9135 * Deliver a debug exception to the guest if it is single-stepping. Don't directly inject a #DB but use the
9136 * pending debug exception field as it takes care of priority of events.
9137 *
9138 * See Intel spec. 32.2.1 "Debug Exceptions".
9139 */
9140 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
9141
9142 return rc;
9143}
9144
9145
9146/**
9147 * Tries to determine what part of the guest-state VT-x has deemed as invalid
9148 * and update error record fields accordingly.
9149 *
9150 * @return VMX_IGS_* return codes.
9151 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
9152 * wrong with the guest state.
9153 *
9154 * @param pVM Pointer to the VM.
9155 * @param pVCpu Pointer to the VMCPU.
9156 * @param pCtx Pointer to the guest-CPU state.
9157 *
9158 * @remarks This function assumes our cache of the VMCS controls
9159 * are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded.
9160 */
9161static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
9162{
9163#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
9164#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { \
9165 uError = (err); \
9166 break; \
9167 } else do { } while (0)
9168
9169 int rc;
9170 uint32_t uError = VMX_IGS_ERROR;
9171 uint32_t u32Val;
9172 bool fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
9173
9174 do
9175 {
9176 /*
9177 * CR0.
9178 */
9179 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
9180 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
9181 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG).
9182 See Intel spec. 26.3.1 "Checks on guest Guest Control Registers, Debug Registers and MSRs." */
9183 if (fUnrestrictedGuest)
9184 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
9185
9186 uint32_t u32GuestCR0;
9187 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32GuestCR0);
9188 AssertRCBreak(rc);
9189 HMVMX_CHECK_BREAK((u32GuestCR0 & uSetCR0) == uSetCR0, VMX_IGS_CR0_FIXED1);
9190 HMVMX_CHECK_BREAK(!(u32GuestCR0 & ~uZapCR0), VMX_IGS_CR0_FIXED0);
9191 if ( !fUnrestrictedGuest
9192 && (u32GuestCR0 & X86_CR0_PG)
9193 && !(u32GuestCR0 & X86_CR0_PE))
9194 {
9195 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
9196 }
9197
9198 /*
9199 * CR4.
9200 */
9201 uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
9202 uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
9203
9204 uint32_t u32GuestCR4;
9205 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32GuestCR4);
9206 AssertRCBreak(rc);
9207 HMVMX_CHECK_BREAK((u32GuestCR4 & uSetCR4) == uSetCR4, VMX_IGS_CR4_FIXED1);
9208 HMVMX_CHECK_BREAK(!(u32GuestCR4 & ~uZapCR4), VMX_IGS_CR4_FIXED0);
9209
9210 /*
9211 * IA32_DEBUGCTL MSR.
9212 */
9213 uint64_t u64Val;
9214 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
9215 AssertRCBreak(rc);
9216 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
9217 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
9218 {
9219 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
9220 }
9221 uint64_t u64DebugCtlMsr = u64Val;
9222
9223#ifdef VBOX_STRICT
9224 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
9225 AssertRCBreak(rc);
9226 Assert(u32Val == pVCpu->hm.s.vmx.u32EntryCtls);
9227#endif
9228 bool const fLongModeGuest = RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);
9229
9230 /*
9231 * RIP and RFLAGS.
9232 */
9233 uint32_t u32Eflags;
9234#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9235 if (HMVMX_IS_64BIT_HOST_MODE())
9236 {
9237 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
9238 AssertRCBreak(rc);
9239 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
9240 if ( !fLongModeGuest
9241 || !pCtx->cs.Attr.n.u1Long)
9242 {
9243 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
9244 }
9245 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
9246 * must be identical if the "IA-32e mode guest" VM-entry
9247 * control is 1 and CS.L is 1. No check applies if the
9248 * CPU supports 64 linear-address bits. */
9249
9250 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
9251 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
9252 AssertRCBreak(rc);
9253 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
9254 VMX_IGS_RFLAGS_RESERVED);
9255 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
9256 u32Eflags = u64Val;
9257 }
9258 else
9259#endif
9260 {
9261 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);
9262 AssertRCBreak(rc);
9263 HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */
9264 HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
9265 }
9266
9267 if ( fLongModeGuest
9268 || ( fUnrestrictedGuest
9269 && !(u32GuestCR0 & X86_CR0_PE)))
9270 {
9271 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
9272 }
9273
9274 uint32_t u32EntryInfo;
9275 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
9276 AssertRCBreak(rc);
9277 if ( VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo)
9278 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
9279 {
9280 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
9281 }
9282
9283 /*
9284 * 64-bit checks.
9285 */
9286#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9287 if (HMVMX_IS_64BIT_HOST_MODE())
9288 {
9289 if ( fLongModeGuest
9290 && !fUnrestrictedGuest)
9291 {
9292 HMVMX_CHECK_BREAK(u32GuestCR0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
9293 HMVMX_CHECK_BREAK(u32GuestCR4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
9294 }
9295
9296 if ( !fLongModeGuest
9297 && (u32GuestCR4 & X86_CR4_PCIDE))
9298 {
9299 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
9300 }
9301
9302 /** @todo CR3 field must be such that bits 63:52 and bits in the range
9303 * 51:32 beyond the processor's physical-address width are 0. */
9304
9305 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
9306 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
9307 {
9308 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
9309 }
9310
9311 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
9312 AssertRCBreak(rc);
9313 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
9314
9315 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
9316 AssertRCBreak(rc);
9317 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
9318 }
9319#endif
9320
9321 /*
9322 * PERF_GLOBAL MSR.
9323 */
9324 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR)
9325 {
9326 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
9327 AssertRCBreak(rc);
9328 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
9329 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
9330 }
9331
9332 /*
9333 * PAT MSR.
9334 */
9335 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR)
9336 {
9337 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
9338 AssertRCBreak(rc);
9339 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
9340 for (unsigned i = 0; i < 8; i++)
9341 {
9342 uint8_t u8Val = (u64Val & 0x7);
9343 if ( u8Val != 0 /* UC */
9344 || u8Val != 1 /* WC */
9345 || u8Val != 4 /* WT */
9346 || u8Val != 5 /* WP */
9347 || u8Val != 6 /* WB */
9348 || u8Val != 7 /* UC- */)
9349 {
9350 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
9351 }
9352 u64Val >>= 3;
9353 }
9354 }
9355
9356 /*
9357 * EFER MSR.
9358 */
9359 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
9360 {
9361 Assert(pVM->hm.s.vmx.fSupportsVmcsEfer);
9362 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
9363 AssertRCBreak(rc);
9364 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
9365 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
9366 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),
9367 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
9368 HMVMX_CHECK_BREAK( fUnrestrictedGuest
9369 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u32GuestCR0 & X86_CR0_PG),
9370 VMX_IGS_EFER_LMA_PG_MISMATCH);
9371 }
9372
9373 /*
9374 * Segment registers.
9375 */
9376 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9377 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
9378 if (!(u32Eflags & X86_EFL_VM))
9379 {
9380 /* CS */
9381 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
9382 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
9383 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
9384 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
9385 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
9386 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
9387 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
9388 /* CS cannot be loaded with NULL in protected mode. */
9389 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
9390 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
9391 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
9392 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
9393 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
9394 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
9395 else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
9396 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
9397 else
9398 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
9399
9400 /* SS */
9401 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9402 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
9403 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
9404 if ( !(pCtx->cr0 & X86_CR0_PE)
9405 || pCtx->cs.Attr.n.u4Type == 3)
9406 {
9407 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
9408 }
9409 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
9410 {
9411 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
9412 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
9413 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
9414 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
9415 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
9416 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
9417 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
9418 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
9419 }
9420
9421 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
9422 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
9423 {
9424 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
9425 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
9426 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9427 || pCtx->ds.Attr.n.u4Type > 11
9428 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
9429 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
9430 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
9431 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
9432 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
9433 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
9434 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
9435 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9436 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
9437 }
9438 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
9439 {
9440 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
9441 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
9442 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9443 || pCtx->es.Attr.n.u4Type > 11
9444 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
9445 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
9446 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
9447 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
9448 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
9449 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
9450 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
9451 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9452 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
9453 }
9454 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
9455 {
9456 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
9457 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
9458 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9459 || pCtx->fs.Attr.n.u4Type > 11
9460 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
9461 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
9462 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
9463 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
9464 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
9465 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
9466 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
9467 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9468 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
9469 }
9470 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
9471 {
9472 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
9473 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
9474 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9475 || pCtx->gs.Attr.n.u4Type > 11
9476 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
9477 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
9478 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
9479 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
9480 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
9481 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
9482 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
9483 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9484 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
9485 }
9486 /* 64-bit capable CPUs. */
9487#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9488 if (HMVMX_IS_64BIT_HOST_MODE())
9489 {
9490 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
9491 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
9492 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9493 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
9494 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
9495 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
9496 VMX_IGS_LONGMODE_SS_BASE_INVALID);
9497 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
9498 VMX_IGS_LONGMODE_DS_BASE_INVALID);
9499 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
9500 VMX_IGS_LONGMODE_ES_BASE_INVALID);
9501 }
9502#endif
9503 }
9504 else
9505 {
9506 /* V86 mode checks. */
9507 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
9508 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
9509 {
9510 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
9511 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
9512 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
9513 }
9514 else
9515 {
9516 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
9517 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
9518 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
9519 }
9520
9521 /* CS */
9522 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
9523 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
9524 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
9525 /* SS */
9526 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
9527 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
9528 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
9529 /* DS */
9530 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
9531 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
9532 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
9533 /* ES */
9534 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
9535 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
9536 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
9537 /* FS */
9538 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
9539 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
9540 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
9541 /* GS */
9542 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
9543 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
9544 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
9545 /* 64-bit capable CPUs. */
9546#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9547 if (HMVMX_IS_64BIT_HOST_MODE())
9548 {
9549 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
9550 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
9551 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9552 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
9553 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
9554 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
9555 VMX_IGS_LONGMODE_SS_BASE_INVALID);
9556 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
9557 VMX_IGS_LONGMODE_DS_BASE_INVALID);
9558 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
9559 VMX_IGS_LONGMODE_ES_BASE_INVALID);
9560 }
9561#endif
9562 }
9563
9564 /*
9565 * TR.
9566 */
9567 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
9568 /* 64-bit capable CPUs. */
9569#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9570 if (HMVMX_IS_64BIT_HOST_MODE())
9571 {
9572 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
9573 }
9574#endif
9575 if (fLongModeGuest)
9576 {
9577 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
9578 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
9579 }
9580 else
9581 {
9582 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
9583 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
9584 VMX_IGS_TR_ATTR_TYPE_INVALID);
9585 }
9586 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
9587 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
9588 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
9589 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
9590 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
9591 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
9592 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
9593 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
9594
9595 /*
9596 * GDTR and IDTR.
9597 */
9598#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9599 if (HMVMX_IS_64BIT_HOST_MODE())
9600 {
9601 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
9602 AssertRCBreak(rc);
9603 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
9604
9605 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
9606 AssertRCBreak(rc);
9607 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
9608 }
9609#endif
9610
9611 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
9612 AssertRCBreak(rc);
9613 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
9614
9615 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
9616 AssertRCBreak(rc);
9617 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
9618
9619 /*
9620 * Guest Non-Register State.
9621 */
9622 /* Activity State. */
9623 uint32_t u32ActivityState;
9624 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
9625 AssertRCBreak(rc);
9626 HMVMX_CHECK_BREAK( !u32ActivityState
9627 || (u32ActivityState & MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.Msrs.u64Misc)),
9628 VMX_IGS_ACTIVITY_STATE_INVALID);
9629 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
9630 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
9631 uint32_t u32IntrState;
9632 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32IntrState);
9633 AssertRCBreak(rc);
9634 if ( u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
9635 || u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
9636 {
9637 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
9638 }
9639
9640 /** @todo Activity state and injecting interrupts. Left as a todo since we
9641 * currently don't use activity states but ACTIVE. */
9642
9643 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
9644 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
9645
9646 /* Guest interruptibility-state. */
9647 HMVMX_CHECK_BREAK(!(u32IntrState & 0xfffffff0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
9648 HMVMX_CHECK_BREAK((u32IntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
9649 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS))
9650 != ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
9651 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
9652 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
9653 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
9654 || !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
9655 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
9656 if (VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo))
9657 {
9658 if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
9659 {
9660 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
9661 && !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
9662 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
9663 }
9664 else if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9665 {
9666 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
9667 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
9668 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
9669 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
9670 }
9671 }
9672 /** @todo Assumes the processor is not in SMM. */
9673 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
9674 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
9675 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
9676 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
9677 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
9678 if ( (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
9679 && VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo)
9680 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9681 {
9682 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI),
9683 VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
9684 }
9685
9686 /* Pending debug exceptions. */
9687 if (HMVMX_IS_64BIT_HOST_MODE())
9688 {
9689 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val);
9690 AssertRCBreak(rc);
9691 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
9692 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
9693 u32Val = u64Val; /* For pending debug exceptions checks below. */
9694 }
9695 else
9696 {
9697 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val);
9698 AssertRCBreak(rc);
9699 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
9700 HMVMX_CHECK_BREAK(!(u64Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
9701 }
9702
9703 if ( (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
9704 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS)
9705 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
9706 {
9707 if ( (u32Eflags & X86_EFL_TF)
9708 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
9709 {
9710 /* Bit 14 is PendingDebug.BS. */
9711 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
9712 }
9713 if ( !(u32Eflags & X86_EFL_TF)
9714 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
9715 {
9716 /* Bit 14 is PendingDebug.BS. */
9717 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
9718 }
9719 }
9720
9721 /* VMCS link pointer. */
9722 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
9723 AssertRCBreak(rc);
9724 if (u64Val != UINT64_C(0xffffffffffffffff))
9725 {
9726 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
9727 /** @todo Bits beyond the processor's physical-address width MBZ. */
9728 /** @todo 32-bit located in memory referenced by value of this field (as a
9729 * physical address) must contain the processor's VMCS revision ID. */
9730 /** @todo SMM checks. */
9731 }
9732
9733 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
9734 * not using Nested Paging? */
9735 if ( pVM->hm.s.fNestedPaging
9736 && !fLongModeGuest
9737 && CPUMIsGuestInPAEModeEx(pCtx))
9738 {
9739 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
9740 AssertRCBreak(rc);
9741 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9742
9743 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
9744 AssertRCBreak(rc);
9745 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9746
9747 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
9748 AssertRCBreak(rc);
9749 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9750
9751 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
9752 AssertRCBreak(rc);
9753 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9754 }
9755
9756 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
9757 if (uError == VMX_IGS_ERROR)
9758 uError = VMX_IGS_REASON_NOT_FOUND;
9759 } while (0);
9760
9761 pVCpu->hm.s.u32HMError = uError;
9762 return uError;
9763
9764#undef HMVMX_ERROR_BREAK
9765#undef HMVMX_CHECK_BREAK
9766}
9767
9768/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9769/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
9770/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9771
9772/** @name VM-exit handlers.
9773 * @{
9774 */
9775
9776/**
9777 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
9778 */
9779HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9780{
9781 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9782 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
9783 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
9784 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
9785 return VINF_SUCCESS;
9786 return VINF_EM_RAW_INTERRUPT;
9787}
9788
9789
9790/**
9791 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9792 */
9793HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9794{
9795 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9796 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
9797
9798 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
9799 AssertRCReturn(rc, rc);
9800
9801 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
9802 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)
9803 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
9804 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
9805
9806 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9807 {
9808 /*
9809 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we injected it ourselves and
9810 * anything we inject is not going to cause a VM-exit directly for the event being injected.
9811 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
9812 *
9813 * Dispatch the NMI to the host. See Intel spec. 27.5.5 "Updating Non-Register State".
9814 */
9815 VMXDispatchHostNmi();
9816 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
9817 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
9818 return VINF_SUCCESS;
9819 }
9820
9821 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
9822 rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
9823 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
9824 {
9825 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
9826 return VINF_SUCCESS;
9827 }
9828 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
9829 {
9830 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
9831 return rc;
9832 }
9833
9834 uint32_t uExitIntInfo = pVmxTransient->uExitIntInfo;
9835 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntInfo);
9836 switch (uIntType)
9837 {
9838 case VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT: /* Privileged software exception. (#DB from ICEBP) */
9839 Assert(uVector == X86_XCPT_DB);
9840 /* no break */
9841 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
9842 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT);
9843 /* no break */
9844 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
9845 {
9846 switch (uVector)
9847 {
9848 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
9849 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
9850 case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVCpu, pMixedCtx, pVmxTransient); break;
9851 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
9852 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
9853 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
9854#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
9855 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
9856 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9857 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
9858 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9859 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
9860 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9861 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
9862 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9863 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
9864 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9865 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS);
9866 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9867#endif
9868 default:
9869 {
9870 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
9871 AssertRCReturn(rc, rc);
9872
9873 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
9874 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
9875 {
9876 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
9877 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
9878 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
9879
9880 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
9881 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
9882 AssertRCReturn(rc, rc);
9883 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
9884 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode,
9885 0 /* GCPtrFaultAddress */);
9886 AssertRCReturn(rc, rc);
9887 }
9888 else
9889 {
9890 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
9891 pVCpu->hm.s.u32HMError = uVector;
9892 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
9893 }
9894 break;
9895 }
9896 }
9897 break;
9898 }
9899
9900 default:
9901 {
9902 pVCpu->hm.s.u32HMError = uExitIntInfo;
9903 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9904 AssertMsgFailed(("Unexpected interruption info %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntInfo)));
9905 break;
9906 }
9907 }
9908 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
9909 return rc;
9910}
9911
9912
9913/**
9914 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9915 */
9916HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9917{
9918 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9919
9920 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
9921 hmR0VmxClearIntWindowExitVmcs(pVCpu);
9922
9923 /* Deliver the pending interrupts via hmR0VmxEvaluatePendingEvent() and resume guest execution. */
9924 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
9925 return VINF_SUCCESS;
9926}
9927
9928
9929/**
9930 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9931 */
9932HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9933{
9934 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9935 if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)))
9936 {
9937 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
9938 HMVMX_RETURN_UNEXPECTED_EXIT();
9939 }
9940
9941 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS));
9942
9943 /*
9944 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
9945 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
9946 */
9947 uint32_t uIntrState = 0;
9948 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
9949 AssertRCReturn(rc, rc);
9950
9951 bool const fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
9952 if ( fBlockSti
9953 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
9954 {
9955 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
9956 }
9957
9958 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
9959 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
9960
9961 /* Deliver the pending NMI via hmR0VmxEvaluatePendingEvent() and resume guest execution. */
9962 return VINF_SUCCESS;
9963}
9964
9965
9966/**
9967 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
9968 */
9969HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9970{
9971 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9972 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
9973 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9974}
9975
9976
9977/**
9978 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
9979 */
9980HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9981{
9982 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9983 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
9984 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9985}
9986
9987
9988/**
9989 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
9990 */
9991HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9992{
9993 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9994 PVM pVM = pVCpu->CTX_SUFF(pVM);
9995 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
9996 if (RT_LIKELY(rc == VINF_SUCCESS))
9997 {
9998 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9999 Assert(pVmxTransient->cbInstr == 2);
10000 }
10001 else
10002 {
10003 AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
10004 rc = VERR_EM_INTERPRETER;
10005 }
10006 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
10007 return rc;
10008}
10009
10010
10011/**
10012 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
10013 */
10014HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10015{
10016 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10017 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
10018 AssertRCReturn(rc, rc);
10019
10020 if (pMixedCtx->cr4 & X86_CR4_SMXE)
10021 return VINF_EM_RAW_EMULATE_INSTR;
10022
10023 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
10024 HMVMX_RETURN_UNEXPECTED_EXIT();
10025}
10026
10027
10028/**
10029 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10030 */
10031HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10032{
10033 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10034 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
10035 AssertRCReturn(rc, rc);
10036
10037 PVM pVM = pVCpu->CTX_SUFF(pVM);
10038 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10039 if (RT_LIKELY(rc == VINF_SUCCESS))
10040 {
10041 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10042 Assert(pVmxTransient->cbInstr == 2);
10043 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
10044 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
10045 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10046 }
10047 else
10048 {
10049 AssertMsgFailed(("hmR0VmxExitRdtsc: EMInterpretRdtsc failed with %Rrc\n", rc));
10050 rc = VERR_EM_INTERPRETER;
10051 }
10052 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
10053 return rc;
10054}
10055
10056
10057/**
10058 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10059 */
10060HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10061{
10062 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10063 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
10064 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */
10065 AssertRCReturn(rc, rc);
10066
10067 PVM pVM = pVCpu->CTX_SUFF(pVM);
10068 rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
10069 if (RT_LIKELY(rc == VINF_SUCCESS))
10070 {
10071 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10072 Assert(pVmxTransient->cbInstr == 3);
10073 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
10074 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
10075 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10076 }
10077 else
10078 {
10079 AssertMsgFailed(("hmR0VmxExitRdtscp: EMInterpretRdtscp failed with %Rrc\n", rc));
10080 rc = VERR_EM_INTERPRETER;
10081 }
10082 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
10083 return rc;
10084}
10085
10086
10087/**
10088 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10089 */
10090HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10091{
10092 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10093 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
10094 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */
10095 AssertRCReturn(rc, rc);
10096
10097 PVM pVM = pVCpu->CTX_SUFF(pVM);
10098 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10099 if (RT_LIKELY(rc == VINF_SUCCESS))
10100 {
10101 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10102 Assert(pVmxTransient->cbInstr == 2);
10103 }
10104 else
10105 {
10106 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
10107 rc = VERR_EM_INTERPRETER;
10108 }
10109 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
10110 return rc;
10111}
10112
10113
10114/**
10115 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
10116 */
10117HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10118{
10119 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10120
10121 int rc = VERR_NOT_SUPPORTED;
10122 if (GIMAreHypercallsEnabled(pVCpu))
10123 {
10124 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10125 AssertRCReturn(rc, rc);
10126
10127 rc = GIMHypercall(pVCpu, pMixedCtx);
10128 }
10129 if (rc != VINF_SUCCESS)
10130 {
10131 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
10132 rc = VINF_SUCCESS;
10133 }
10134
10135 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall);
10136 return rc;
10137}
10138
10139
10140/**
10141 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10142 */
10143HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10144{
10145 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10146 PVM pVM = pVCpu->CTX_SUFF(pVM);
10147 Assert(!pVM->hm.s.fNestedPaging);
10148
10149 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10150 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
10151 AssertRCReturn(rc, rc);
10152
10153 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
10154 rc = VBOXSTRICTRC_VAL(rc2);
10155 if (RT_LIKELY(rc == VINF_SUCCESS))
10156 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10157 else
10158 {
10159 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RX64 failed with %Rrc\n",
10160 pVmxTransient->uExitQualification, rc));
10161 }
10162 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
10163 return rc;
10164}
10165
10166
10167/**
10168 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10169 */
10170HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10171{
10172 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10173 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10174 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10175 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10176 AssertRCReturn(rc, rc);
10177
10178 PVM pVM = pVCpu->CTX_SUFF(pVM);
10179 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10180 if (RT_LIKELY(rc == VINF_SUCCESS))
10181 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10182 else
10183 {
10184 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
10185 rc = VERR_EM_INTERPRETER;
10186 }
10187 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
10188 return rc;
10189}
10190
10191
10192/**
10193 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10194 */
10195HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10196{
10197 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10198 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10199 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10200 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10201 AssertRCReturn(rc, rc);
10202
10203 PVM pVM = pVCpu->CTX_SUFF(pVM);
10204 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10205 rc = VBOXSTRICTRC_VAL(rc2);
10206 if (RT_LIKELY( rc == VINF_SUCCESS
10207 || rc == VINF_EM_HALT))
10208 {
10209 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10210 AssertRCReturn(rc3, rc3);
10211
10212 if ( rc == VINF_EM_HALT
10213 && EMMonitorWaitShouldContinue(pVCpu, pMixedCtx))
10214 {
10215 rc = VINF_SUCCESS;
10216 }
10217 }
10218 else
10219 {
10220 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
10221 rc = VERR_EM_INTERPRETER;
10222 }
10223 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
10224 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
10225 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
10226 return rc;
10227}
10228
10229
10230/**
10231 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
10232 */
10233HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10234{
10235 /*
10236 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
10237 * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
10238 * executing VMCALL in VMX root operation. If we get here, something funny is going on.
10239 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
10240 */
10241 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10242 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10243 HMVMX_RETURN_UNEXPECTED_EXIT();
10244}
10245
10246
10247/**
10248 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
10249 */
10250HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10251{
10252 /*
10253 * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
10254 * root operation. Only an STM (SMM transfer monitor) would get this exit when we (the executive monitor) execute a VMCALL
10255 * in VMX root mode or receive an SMI. If we get here, something funny is going on.
10256 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
10257 */
10258 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10259 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10260 HMVMX_RETURN_UNEXPECTED_EXIT();
10261}
10262
10263
10264/**
10265 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
10266 */
10267HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10268{
10269 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
10270 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10271 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10272 HMVMX_RETURN_UNEXPECTED_EXIT();
10273}
10274
10275
10276/**
10277 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
10278 */
10279HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10280{
10281 /*
10282 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
10283 * don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
10284 * See Intel spec. 25.3 "Other Causes of VM-exits".
10285 */
10286 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10287 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10288 HMVMX_RETURN_UNEXPECTED_EXIT();
10289}
10290
10291
10292/**
10293 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
10294 * VM-exit.
10295 */
10296HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10297{
10298 /*
10299 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
10300 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery" and Intel spec. 29.3 "VMX Instructions" for "VMXON".
10301 *
10302 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these VM-exits.
10303 * See Intel spec. "23.8 Restrictions on VMX operation".
10304 */
10305 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10306 return VINF_SUCCESS;
10307}
10308
10309
10310/**
10311 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
10312 * VM-exit.
10313 */
10314HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10315{
10316 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10317 return VINF_EM_RESET;
10318}
10319
10320
10321/**
10322 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10323 */
10324HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10325{
10326 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10327 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
10328 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
10329 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10330 AssertRCReturn(rc, rc);
10331
10332 pMixedCtx->rip++;
10333 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
10334 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
10335 rc = VINF_SUCCESS;
10336 else
10337 rc = VINF_EM_HALT;
10338
10339 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
10340 return rc;
10341}
10342
10343
10344/**
10345 * VM-exit handler for instructions that result in a #UD exception delivered to
10346 * the guest.
10347 */
10348HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10349{
10350 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10351 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
10352 return VINF_SUCCESS;
10353}
10354
10355
10356/**
10357 * VM-exit handler for expiry of the VMX preemption timer.
10358 */
10359HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10360{
10361 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10362
10363 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
10364 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10365
10366 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
10367 PVM pVM = pVCpu->CTX_SUFF(pVM);
10368 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
10369 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
10370 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
10371}
10372
10373
10374/**
10375 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
10376 */
10377HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10378{
10379 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10380
10381 /* We expose XSETBV to the guest, fallback to the recompiler for emulation. */
10382 /** @todo check if XSETBV is supported by the recompiler. */
10383 return VERR_EM_INTERPRETER;
10384}
10385
10386
10387/**
10388 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10389 */
10390HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10391{
10392 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10393
10394 /* The guest should not invalidate the host CPU's TLBs, fallback to recompiler. */
10395 /** @todo implement EMInterpretInvpcid() */
10396 return VERR_EM_INTERPRETER;
10397}
10398
10399
10400/**
10401 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
10402 * Error VM-exit.
10403 */
10404HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10405{
10406 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10407 AssertRCReturn(rc, rc);
10408
10409 rc = hmR0VmxCheckVmcsCtls(pVCpu);
10410 AssertRCReturn(rc, rc);
10411
10412 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
10413 NOREF(uInvalidReason);
10414
10415#ifdef VBOX_STRICT
10416 uint32_t uIntrState;
10417 HMVMXHCUINTREG uHCReg;
10418 uint64_t u64Val;
10419 uint32_t u32Val;
10420
10421 rc = hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
10422 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
10423 rc |= hmR0VmxReadEntryInstrLenVmcs(pVmxTransient);
10424 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
10425 AssertRCReturn(rc, rc);
10426
10427 Log4(("uInvalidReason %u\n", uInvalidReason));
10428 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
10429 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
10430 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
10431 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));
10432
10433 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
10434 Log4(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
10435 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
10436 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
10437 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
10438 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
10439 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
10440 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
10441 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
10442 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
10443 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
10444 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
10445#else
10446 NOREF(pVmxTransient);
10447#endif
10448
10449 HMDumpRegs(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
10450 return VERR_VMX_INVALID_GUEST_STATE;
10451}
10452
10453
10454/**
10455 * VM-exit handler for VM-entry failure due to an MSR-load
10456 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
10457 */
10458HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10459{
10460 NOREF(pVmxTransient);
10461 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
10462 HMVMX_RETURN_UNEXPECTED_EXIT();
10463}
10464
10465
10466/**
10467 * VM-exit handler for VM-entry failure due to a machine-check event
10468 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
10469 */
10470HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10471{
10472 NOREF(pVmxTransient);
10473 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
10474 HMVMX_RETURN_UNEXPECTED_EXIT();
10475}
10476
10477
10478/**
10479 * VM-exit handler for all undefined reasons. Should never ever happen.. in
10480 * theory.
10481 */
10482HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10483{
10484 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
10485 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient);
10486 return VERR_VMX_UNDEFINED_EXIT_CODE;
10487}
10488
10489
10490/**
10491 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
10492 * (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
10493 * Conditional VM-exit.
10494 */
10495HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10496{
10497 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10498
10499 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
10500 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
10501 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
10502 return VERR_EM_INTERPRETER;
10503 AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10504 HMVMX_RETURN_UNEXPECTED_EXIT();
10505}
10506
10507
10508/**
10509 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
10510 */
10511HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10512{
10513 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10514
10515 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
10516 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
10517 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
10518 return VERR_EM_INTERPRETER;
10519 AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10520 HMVMX_RETURN_UNEXPECTED_EXIT();
10521}
10522
10523
10524/**
10525 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10526 */
10527HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10528{
10529 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10530
10531 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
10532 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10533 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10534 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10535 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10536 {
10537 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
10538 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
10539 }
10540 AssertRCReturn(rc, rc);
10541 Log4(("CS:RIP=%04x:%#RX64 ECX=%X\n", pMixedCtx->cs.Sel, pMixedCtx->rip, pMixedCtx->ecx));
10542
10543#ifdef VBOX_STRICT
10544 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
10545 {
10546 if ( hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)
10547 && pMixedCtx->ecx != MSR_K6_EFER)
10548 {
10549 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
10550 HMVMX_RETURN_UNEXPECTED_EXIT();
10551 }
10552# if HC_ARCH_BITS == 64
10553 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests
10554 && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10555 {
10556 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
10557 HMVMX_RETURN_UNEXPECTED_EXIT();
10558 }
10559# endif
10560 }
10561#endif
10562
10563 PVM pVM = pVCpu->CTX_SUFF(pVM);
10564 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10565 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER,
10566 ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
10567 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
10568 if (RT_LIKELY(rc == VINF_SUCCESS))
10569 {
10570 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10571 Assert(pVmxTransient->cbInstr == 2);
10572 }
10573 return rc;
10574}
10575
10576
10577/**
10578 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10579 */
10580HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10581{
10582 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10583 PVM pVM = pVCpu->CTX_SUFF(pVM);
10584 int rc = VINF_SUCCESS;
10585
10586 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
10587 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10588 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10589 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10590 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10591 {
10592 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
10593 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
10594 }
10595 AssertRCReturn(rc, rc);
10596 Log4(("ecx=%#RX32\n", pMixedCtx->ecx));
10597
10598 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10599 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));
10600 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
10601
10602 if (RT_LIKELY(rc == VINF_SUCCESS))
10603 {
10604 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10605
10606 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
10607 if ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START
10608 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
10609 {
10610 /* We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register
10611 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before
10612 EMInterpretWrmsr() changes it. */
10613 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
10614 }
10615 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
10616 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10617 else if (pMixedCtx->ecx == MSR_K6_EFER)
10618 {
10619 /*
10620 * If the guest touches EFER we need to update the VM-Entry and VM-Exit controls as well,
10621 * even if it is -not- touching bits that cause paging mode changes (LMA/LME). We care about
10622 * the other bits as well, SCE and NXE. See @bugref{7368}.
10623 */
10624 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS);
10625 }
10626
10627 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */
10628 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10629 {
10630 switch (pMixedCtx->ecx)
10631 {
10632 case MSR_IA32_SYSENTER_CS: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
10633 case MSR_IA32_SYSENTER_EIP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
10634 case MSR_IA32_SYSENTER_ESP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
10635 case MSR_K8_FS_BASE: /* no break */
10636 case MSR_K8_GS_BASE: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break;
10637 case MSR_K6_EFER: /* already handled above */ break;
10638 default:
10639 {
10640 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
10641 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
10642#if HC_ARCH_BITS == 64
10643 else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10644 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
10645#endif
10646 break;
10647 }
10648 }
10649 }
10650#ifdef VBOX_STRICT
10651 else
10652 {
10653 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
10654 switch (pMixedCtx->ecx)
10655 {
10656 case MSR_IA32_SYSENTER_CS:
10657 case MSR_IA32_SYSENTER_EIP:
10658 case MSR_IA32_SYSENTER_ESP:
10659 case MSR_K8_FS_BASE:
10660 case MSR_K8_GS_BASE:
10661 {
10662 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
10663 HMVMX_RETURN_UNEXPECTED_EXIT();
10664 }
10665
10666 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
10667 default:
10668 {
10669 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
10670 {
10671 /* EFER writes are always intercepted, see hmR0VmxLoadGuestMsrs(). */
10672 if (pMixedCtx->ecx != MSR_K6_EFER)
10673 {
10674 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
10675 pMixedCtx->ecx));
10676 HMVMX_RETURN_UNEXPECTED_EXIT();
10677 }
10678 }
10679
10680#if HC_ARCH_BITS == 64
10681 if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10682 {
10683 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
10684 HMVMX_RETURN_UNEXPECTED_EXIT();
10685 }
10686#endif
10687 break;
10688 }
10689 }
10690 }
10691#endif /* VBOX_STRICT */
10692 }
10693 return rc;
10694}
10695
10696
10697/**
10698 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10699 */
10700HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10701{
10702 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10703
10704 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT. */
10705 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
10706 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT)
10707 return VERR_EM_INTERPRETER;
10708 AssertMsgFailed(("Unexpected PAUSE exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10709 HMVMX_RETURN_UNEXPECTED_EXIT();
10710}
10711
10712
10713/**
10714 * VM-exit handler for when the TPR value is lowered below the specified
10715 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10716 */
10717HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10718{
10719 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10720 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
10721
10722 /*
10723 * The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
10724 * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectPendingEvent() and
10725 * resume guest execution.
10726 */
10727 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
10728 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
10729 return VINF_SUCCESS;
10730}
10731
10732
10733/**
10734 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
10735 * VM-exit.
10736 *
10737 * @retval VINF_SUCCESS when guest execution can continue.
10738 * @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
10739 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
10740 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
10741 * recompiler.
10742 */
10743HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10744{
10745 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10746 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
10747 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10748 AssertRCReturn(rc, rc);
10749
10750 RTGCUINTPTR const uExitQualification = pVmxTransient->uExitQualification;
10751 uint32_t const uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
10752 PVM pVM = pVCpu->CTX_SUFF(pVM);
10753 switch (uAccessType)
10754 {
10755 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */
10756 {
10757#if 0
10758 /* EMInterpretCRxWrite() references a lot of guest state (EFER, RFLAGS, Segment Registers, etc.) Sync entire state */
10759 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10760#else
10761 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
10762 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
10763 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10764#endif
10765 AssertRCReturn(rc, rc);
10766
10767 rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
10768 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
10769 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification));
10770 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
10771
10772 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification))
10773 {
10774 case 0: /* CR0 */
10775 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
10776 Log4(("CRX CR0 write rc=%d CR0=%#RX64\n", rc, pMixedCtx->cr0));
10777 break;
10778 case 2: /* CR2 */
10779 /* Nothing to do here, CR2 it's not part of the VMCS. */
10780 break;
10781 case 3: /* CR3 */
10782 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
10783 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3);
10784 Log4(("CRX CR3 write rc=%d CR3=%#RX64\n", rc, pMixedCtx->cr3));
10785 break;
10786 case 4: /* CR4 */
10787 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
10788 Log4(("CRX CR4 write rc=%d CR4=%#RX64\n", rc, pMixedCtx->cr4));
10789 break;
10790 case 8: /* CR8 */
10791 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
10792 /* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */
10793 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
10794 break;
10795 default:
10796 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
10797 break;
10798 }
10799
10800 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
10801 break;
10802 }
10803
10804 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */
10805 {
10806 /* EMInterpretCRxRead() requires EFER MSR, CS. */
10807 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10808 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
10809 AssertRCReturn(rc, rc);
10810 Assert( !pVM->hm.s.fNestedPaging
10811 || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
10812 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
10813
10814 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
10815 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
10816 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
10817
10818 rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
10819 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification),
10820 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification));
10821 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
10822 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
10823 Log4(("CRX CR%d Read access rc=%d\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), rc));
10824 break;
10825 }
10826
10827 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
10828 {
10829 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10830 AssertRCReturn(rc, rc);
10831 rc = EMInterpretCLTS(pVM, pVCpu);
10832 AssertRCReturn(rc, rc);
10833 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
10834 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
10835 Log4(("CRX CLTS write rc=%d\n", rc));
10836 break;
10837 }
10838
10839 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10840 {
10841 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10842 AssertRCReturn(rc, rc);
10843 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
10844 if (RT_LIKELY(rc == VINF_SUCCESS))
10845 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
10846 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
10847 Log4(("CRX LMSW write rc=%d\n", rc));
10848 break;
10849 }
10850
10851 default:
10852 {
10853 AssertMsgFailed(("Invalid access-type in Mov CRx exit qualification %#x\n", uAccessType));
10854 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
10855 }
10856 }
10857
10858 /* Validate possible error codes. */
10859 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_CHANGE_MODE || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_SYNC_CR3
10860 || rc == VERR_VMX_UNEXPECTED_EXCEPTION);
10861 if (RT_SUCCESS(rc))
10862 {
10863 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10864 AssertRCReturn(rc2, rc2);
10865 }
10866
10867 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
10868 return rc;
10869}
10870
10871
10872/**
10873 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
10874 * VM-exit.
10875 */
10876HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10877{
10878 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10879 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
10880
10881 int rc2 = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10882 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
10883 rc2 |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
10884 rc2 |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */
10885 rc2 |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
10886 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
10887 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
10888 AssertRCReturn(rc2, rc2);
10889
10890 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
10891 uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification);
10892 uint8_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification);
10893 bool fIOWrite = ( VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)
10894 == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
10895 bool fIOString = VMX_EXIT_QUALIFICATION_IO_IS_STRING(pVmxTransient->uExitQualification);
10896 bool fStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
10897 AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_VMX_IPE_1);
10898
10899 /* I/O operation lookup arrays. */
10900 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */
10901 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */
10902
10903 VBOXSTRICTRC rcStrict;
10904 uint32_t const cbValue = s_aIOSizes[uIOWidth];
10905 uint32_t const cbInstr = pVmxTransient->cbInstr;
10906 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
10907 PVM pVM = pVCpu->CTX_SUFF(pVM);
10908 if (fIOString)
10909 {
10910#if 0 /* Not yet ready. IEM gurus with debian 32-bit guest without NP (on ATA reads). See @bugref{5752#c158} */
10911 /*
10912 * INS/OUTS - I/O String instruction.
10913 *
10914 * Use instruction-information if available, otherwise fall back on
10915 * interpreting the instruction.
10916 */
10917 Log4(("CS:RIP=%04x:%#RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
10918 AssertReturn(pMixedCtx->dx == uIOPort, VERR_VMX_IPE_2);
10919 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo))
10920 {
10921 rc2 = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
10922 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
10923 rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10924 AssertRCReturn(rc2, rc2);
10925 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
10926 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
10927 IEMMODE enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
10928 bool fRep = VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification);
10929 if (fIOWrite)
10930 {
10931 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
10932 pVmxTransient->ExitInstrInfo.StrIo.iSegReg);
10933 }
10934 else
10935 {
10936 /*
10937 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
10938 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
10939 * See Intel Instruction spec. for "INS".
10940 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
10941 */
10942 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr);
10943 }
10944 }
10945 else
10946 {
10947 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
10948 rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10949 AssertRCReturn(rc2, rc2);
10950 rcStrict = IEMExecOne(pVCpu);
10951 }
10952 /** @todo IEM needs to be setting these flags somehow. */
10953 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
10954 fUpdateRipAlready = true;
10955#else
10956 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
10957 rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL /* pcbInstr */);
10958 if (RT_SUCCESS(rcStrict))
10959 {
10960 if (fIOWrite)
10961 {
10962 rcStrict = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
10963 (DISCPUMODE)pDis->uAddrMode, cbValue);
10964 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
10965 }
10966 else
10967 {
10968 rcStrict = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
10969 (DISCPUMODE)pDis->uAddrMode, cbValue);
10970 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
10971 }
10972 }
10973 else
10974 {
10975 AssertMsg(rcStrict == VERR_EM_INTERPRETER, ("rcStrict=%Rrc RIP %#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->rip));
10976 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
10977 }
10978#endif
10979 }
10980 else
10981 {
10982 /*
10983 * IN/OUT - I/O instruction.
10984 */
10985 Log4(("CS:RIP=%04x:%#RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
10986 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth];
10987 Assert(!VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification));
10988 if (fIOWrite)
10989 {
10990 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue);
10991 if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
10992 HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
10993 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
10994 }
10995 else
10996 {
10997 uint32_t u32Result = 0;
10998 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
10999 if (IOM_SUCCESS(rcStrict))
11000 {
11001 /* Save result of I/O IN instr. in AL/AX/EAX. */
11002 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
11003 }
11004 else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
11005 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
11006 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
11007 }
11008 }
11009
11010 if (IOM_SUCCESS(rcStrict))
11011 {
11012 if (!fUpdateRipAlready)
11013 {
11014 pMixedCtx->rip += cbInstr;
11015 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
11016 }
11017
11018 /*
11019 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru while booting Fedora 17 64-bit guest.
11020 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
11021 */
11022 if (fIOString)
11023 {
11024 /** @todo Single-step for INS/OUTS with REP prefix? */
11025 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
11026 }
11027 else if (fStepping)
11028 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11029
11030 /*
11031 * If any I/O breakpoints are armed, we need to check if one triggered
11032 * and take appropriate action.
11033 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
11034 */
11035 rc2 = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11036 AssertRCReturn(rc2, rc2);
11037
11038 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
11039 * execution engines about whether hyper BPs and such are pending. */
11040 uint32_t const uDr7 = pMixedCtx->dr[7];
11041 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
11042 && X86_DR7_ANY_RW_IO(uDr7)
11043 && (pMixedCtx->cr4 & X86_CR4_DE))
11044 || DBGFBpIsHwIoArmed(pVM)))
11045 {
11046 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
11047
11048 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
11049 VMMRZCallRing3Disable(pVCpu);
11050 HM_DISABLE_PREEMPT_IF_NEEDED();
11051
11052 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /*fDr6*/);
11053
11054 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pMixedCtx, uIOPort, cbValue);
11055 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
11056 {
11057 /* Raise #DB. */
11058 if (fIsGuestDbgActive)
11059 ASMSetDR6(pMixedCtx->dr[6]);
11060 if (pMixedCtx->dr[7] != uDr7)
11061 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
11062
11063 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
11064 }
11065 /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */
11066 else if ( rcStrict2 != VINF_SUCCESS
11067 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
11068 rcStrict = rcStrict2;
11069
11070 HM_RESTORE_PREEMPT_IF_NEEDED();
11071 VMMRZCallRing3Enable(pVCpu);
11072 }
11073 }
11074
11075#ifdef DEBUG
11076 if (rcStrict == VINF_IOM_R3_IOPORT_READ)
11077 Assert(!fIOWrite);
11078 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
11079 Assert(fIOWrite);
11080 else
11081 {
11082 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
11083 * statuses, that the VMM device and some others may return. See
11084 * IOM_SUCCESS() for guidance. */
11085 AssertMsg( RT_FAILURE(rcStrict)
11086 || rcStrict == VINF_SUCCESS
11087 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
11088 || rcStrict == VINF_EM_DBG_BREAKPOINT
11089 || rcStrict == VINF_EM_RAW_GUEST_TRAP
11090 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11091 }
11092#endif
11093
11094 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
11095 return VBOXSTRICTRC_TODO(rcStrict);
11096}
11097
11098
11099/**
11100 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
11101 * VM-exit.
11102 */
11103HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11104{
11105 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11106
11107 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
11108 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11109 AssertRCReturn(rc, rc);
11110 if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
11111 {
11112 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
11113 AssertRCReturn(rc, rc);
11114 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
11115 {
11116 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
11117
11118 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
11119 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
11120
11121 /* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */
11122 Assert(!pVCpu->hm.s.Event.fPending);
11123 pVCpu->hm.s.Event.fPending = true;
11124 pVCpu->hm.s.Event.u64IntInfo = pVmxTransient->uIdtVectoringInfo;
11125 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
11126 AssertRCReturn(rc, rc);
11127 if (fErrorCodeValid)
11128 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
11129 else
11130 pVCpu->hm.s.Event.u32ErrCode = 0;
11131 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
11132 && uVector == X86_XCPT_PF)
11133 {
11134 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2;
11135 }
11136
11137 Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
11138 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
11139 return VINF_EM_RAW_INJECT_TRPM_EVENT;
11140 }
11141 }
11142
11143 /** @todo Emulate task switch someday, currently just going back to ring-3 for
11144 * emulation. */
11145 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
11146 return VERR_EM_INTERPRETER;
11147}
11148
11149
11150/**
11151 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
11152 */
11153HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11154{
11155 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11156 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
11157 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
11158 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
11159 AssertRCReturn(rc, rc);
11160 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
11161 return VINF_EM_DBG_STEPPED;
11162}
11163
11164
11165/**
11166 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
11167 */
11168HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11169{
11170 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11171
11172 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11173 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11174 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
11175 return VINF_SUCCESS;
11176 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
11177 return rc;
11178
11179#if 0
11180 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
11181 * just sync the whole thing. */
11182 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11183#else
11184 /* Aggressive state sync. for now. */
11185 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11186 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11187 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11188#endif
11189 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11190 AssertRCReturn(rc, rc);
11191
11192 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
11193 uint32_t uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
11194 switch (uAccessType)
11195 {
11196 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
11197 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
11198 {
11199 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
11200 && VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) == 0x80)
11201 {
11202 AssertMsgFailed(("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
11203 }
11204
11205 RTGCPHYS GCPhys = pMixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */
11206 GCPhys &= PAGE_BASE_GC_MASK;
11207 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
11208 PVM pVM = pVCpu->CTX_SUFF(pVM);
11209 Log4(("ApicAccess uAccessType=%#x GCPhys=%#RGv Off=%#x\n", uAccessType, GCPhys,
11210 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
11211
11212 VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu,
11213 (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW,
11214 CPUMCTX2CORE(pMixedCtx), GCPhys);
11215 rc = VBOXSTRICTRC_VAL(rc2);
11216 Log4(("ApicAccess rc=%d\n", rc));
11217 if ( rc == VINF_SUCCESS
11218 || rc == VERR_PAGE_TABLE_NOT_PRESENT
11219 || rc == VERR_PAGE_NOT_PRESENT)
11220 {
11221 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11222 | HM_CHANGED_GUEST_RSP
11223 | HM_CHANGED_GUEST_RFLAGS
11224 | HM_CHANGED_VMX_GUEST_APIC_STATE);
11225 rc = VINF_SUCCESS;
11226 }
11227 break;
11228 }
11229
11230 default:
11231 Log4(("ApicAccess uAccessType=%#x\n", uAccessType));
11232 rc = VINF_EM_RAW_EMULATE_INSTR;
11233 break;
11234 }
11235
11236 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
11237 return rc;
11238}
11239
11240
11241/**
11242 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
11243 * VM-exit.
11244 */
11245HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11246{
11247 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11248
11249 /* We should -not- get this VM-exit if the guest's debug registers were active. */
11250 if (pVmxTransient->fWasGuestDebugStateActive)
11251 {
11252 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11253 HMVMX_RETURN_UNEXPECTED_EXIT();
11254 }
11255
11256 int rc = VERR_INTERNAL_ERROR_5;
11257 if ( !DBGFIsStepping(pVCpu)
11258 && !pVCpu->hm.s.fSingleInstruction
11259 && !pVmxTransient->fWasHyperDebugStateActive)
11260 {
11261 /* Don't intercept MOV DRx and #DB any more. */
11262 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
11263 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
11264 AssertRCReturn(rc, rc);
11265
11266 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
11267 {
11268#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11269 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
11270 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
11271 AssertRCReturn(rc, rc);
11272#endif
11273 }
11274
11275 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
11276 VMMRZCallRing3Disable(pVCpu);
11277 HM_DISABLE_PREEMPT_IF_NEEDED();
11278
11279 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
11280 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
11281 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
11282
11283 HM_RESTORE_PREEMPT_IF_NEEDED();
11284 VMMRZCallRing3Enable(pVCpu);
11285
11286#ifdef VBOX_WITH_STATISTICS
11287 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11288 AssertRCReturn(rc, rc);
11289 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
11290 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
11291 else
11292 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
11293#endif
11294 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
11295 return VINF_SUCCESS;
11296 }
11297
11298 /*
11299 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date.
11300 * Update the segment registers and DR7 from the CPU.
11301 */
11302 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11303 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11304 rc |= hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11305 AssertRCReturn(rc, rc);
11306 Log4(("CS:RIP=%04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
11307
11308 PVM pVM = pVCpu->CTX_SUFF(pVM);
11309 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
11310 {
11311 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
11312 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),
11313 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
11314 if (RT_SUCCESS(rc))
11315 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
11316 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
11317 }
11318 else
11319 {
11320 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
11321 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification),
11322 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));
11323 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
11324 }
11325
11326 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
11327 if (RT_SUCCESS(rc))
11328 {
11329 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11330 AssertRCReturn(rc2, rc2);
11331 }
11332 return rc;
11333}
11334
11335
11336/**
11337 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
11338 * Conditional VM-exit.
11339 */
11340HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11341{
11342 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11343 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
11344
11345 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11346 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11347 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
11348 return VINF_SUCCESS;
11349 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
11350 return rc;
11351
11352 RTGCPHYS GCPhys = 0;
11353 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
11354
11355#if 0
11356 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
11357#else
11358 /* Aggressive state sync. for now. */
11359 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11360 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11361 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11362#endif
11363 AssertRCReturn(rc, rc);
11364
11365 /*
11366 * If we succeed, resume guest execution.
11367 * If we fail in interpreting the instruction because we couldn't get the guest physical address
11368 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
11369 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
11370 * weird case. See @bugref{6043}.
11371 */
11372 PVM pVM = pVCpu->CTX_SUFF(pVM);
11373 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
11374 rc = VBOXSTRICTRC_VAL(rc2);
11375 Log4(("EPT misconfig at %#RGv RIP=%#RX64 rc=%d\n", GCPhys, pMixedCtx->rip, rc));
11376 if ( rc == VINF_SUCCESS
11377 || rc == VERR_PAGE_TABLE_NOT_PRESENT
11378 || rc == VERR_PAGE_NOT_PRESENT)
11379 {
11380 /* Successfully handled MMIO operation. */
11381 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11382 | HM_CHANGED_GUEST_RSP
11383 | HM_CHANGED_GUEST_RFLAGS
11384 | HM_CHANGED_VMX_GUEST_APIC_STATE);
11385 rc = VINF_SUCCESS;
11386 }
11387 return rc;
11388}
11389
11390
11391/**
11392 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
11393 * VM-exit.
11394 */
11395HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11396{
11397 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11398 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
11399
11400 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11401 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11402 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
11403 return VINF_SUCCESS;
11404 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
11405 return rc;
11406
11407 RTGCPHYS GCPhys = 0;
11408 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
11409 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11410#if 0
11411 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
11412#else
11413 /* Aggressive state sync. for now. */
11414 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11415 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11416 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11417#endif
11418 AssertRCReturn(rc, rc);
11419
11420 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
11421 AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));
11422
11423 RTGCUINT uErrorCode = 0;
11424 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
11425 uErrorCode |= X86_TRAP_PF_ID;
11426 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
11427 uErrorCode |= X86_TRAP_PF_RW;
11428 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
11429 uErrorCode |= X86_TRAP_PF_P;
11430
11431 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
11432
11433 Log4(("EPT violation %#x at %#RX64 ErrorCode %#x CS:EIP=%04x:%#RX64\n", pVmxTransient->uExitQualification, GCPhys,
11434 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
11435
11436 /* Handle the pagefault trap for the nested shadow table. */
11437 PVM pVM = pVCpu->CTX_SUFF(pVM);
11438 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
11439 TRPMResetTrap(pVCpu);
11440
11441 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
11442 if ( rc == VINF_SUCCESS
11443 || rc == VERR_PAGE_TABLE_NOT_PRESENT
11444 || rc == VERR_PAGE_NOT_PRESENT)
11445 {
11446 /* Successfully synced our nested page tables. */
11447 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
11448 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11449 | HM_CHANGED_GUEST_RSP
11450 | HM_CHANGED_GUEST_RFLAGS);
11451 return VINF_SUCCESS;
11452 }
11453
11454 Log4(("EPT return to ring-3 rc=%Rrc\n", rc));
11455 return rc;
11456}
11457
11458/** @} */
11459
11460/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
11461/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
11462/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
11463
11464/** @name VM-exit exception handlers.
11465 * @{
11466 */
11467
11468/**
11469 * VM-exit exception handler for #MF (Math Fault: floating point exception).
11470 */
11471static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11472{
11473 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11474 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
11475
11476 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11477 AssertRCReturn(rc, rc);
11478
11479 if (!(pMixedCtx->cr0 & X86_CR0_NE))
11480 {
11481 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
11482 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
11483
11484 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
11485 * provides VM-exit instruction length. If this causes problem later,
11486 * disassemble the instruction like it's done on AMD-V. */
11487 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11488 AssertRCReturn(rc2, rc2);
11489 return rc;
11490 }
11491
11492 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11493 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11494 return rc;
11495}
11496
11497
11498/**
11499 * VM-exit exception handler for #BP (Breakpoint exception).
11500 */
11501static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11502{
11503 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11504 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
11505
11506 /** @todo Try optimize this by not saving the entire guest state unless
11507 * really needed. */
11508 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11509 AssertRCReturn(rc, rc);
11510
11511 PVM pVM = pVCpu->CTX_SUFF(pVM);
11512 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11513 if (rc == VINF_EM_RAW_GUEST_TRAP)
11514 {
11515 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11516 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11517 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11518 AssertRCReturn(rc, rc);
11519
11520 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11521 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11522 }
11523
11524 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
11525 return rc;
11526}
11527
11528
11529/**
11530 * VM-exit exception handler for #DB (Debug exception).
11531 */
11532static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11533{
11534 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11535 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
11536 Log6(("XcptDB\n"));
11537
11538 /*
11539 * Get the DR6-like values from the exit qualification and pass it to DBGF
11540 * for processing.
11541 */
11542 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11543 AssertRCReturn(rc, rc);
11544
11545 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
11546 uint64_t uDR6 = X86_DR6_INIT_VAL;
11547 uDR6 |= ( pVmxTransient->uExitQualification
11548 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
11549
11550 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
11551 if (rc == VINF_EM_RAW_GUEST_TRAP)
11552 {
11553 /*
11554 * The exception was for the guest. Update DR6, DR7.GD and
11555 * IA32_DEBUGCTL.LBR before forwarding it.
11556 * (See Intel spec. 27.1 "Architectural State before a VM-Exit".)
11557 */
11558 VMMRZCallRing3Disable(pVCpu);
11559 HM_DISABLE_PREEMPT_IF_NEEDED();
11560
11561 pMixedCtx->dr[6] &= ~X86_DR6_B_MASK;
11562 pMixedCtx->dr[6] |= uDR6;
11563 if (CPUMIsGuestDebugStateActive(pVCpu))
11564 ASMSetDR6(pMixedCtx->dr[6]);
11565
11566 HM_RESTORE_PREEMPT_IF_NEEDED();
11567 VMMRZCallRing3Enable(pVCpu);
11568
11569 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11570 AssertRCReturn(rc, rc);
11571
11572 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
11573 pMixedCtx->dr[7] &= ~X86_DR7_GD;
11574
11575 /* Paranoia. */
11576 pMixedCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
11577 pMixedCtx->dr[7] |= X86_DR7_RA1_MASK;
11578
11579 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
11580 AssertRCReturn(rc, rc);
11581
11582 /*
11583 * Raise #DB in the guest.
11584 *
11585 * It is important to reflect what the VM-exit gave us (preserving the interruption-type) rather than use
11586 * hmR0VmxSetPendingXcptDB() as the #DB could've been raised while executing ICEBP and not the 'normal' #DB.
11587 * Thus it -may- trigger different handling in the CPU (like skipped DPL checks). See @bugref{6398}.
11588 *
11589 * Since ICEBP isn't documented on Intel, see AMD spec. 15.20 "Event Injection".
11590 */
11591 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11592 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11593 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11594 AssertRCReturn(rc, rc);
11595 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11596 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11597 return VINF_SUCCESS;
11598 }
11599
11600 /*
11601 * Not a guest trap, must be a hypervisor related debug event then.
11602 * Update DR6 in case someone is interested in it.
11603 */
11604 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
11605 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
11606 CPUMSetHyperDR6(pVCpu, uDR6);
11607
11608 return rc;
11609}
11610
11611
11612/**
11613 * VM-exit exception handler for #NM (Device-not-available exception: floating
11614 * point exception).
11615 */
11616static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11617{
11618 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11619
11620 /* We require CR0 and EFER. EFER is always up-to-date. */
11621 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11622 AssertRCReturn(rc, rc);
11623
11624 /* We're playing with the host CPU state here, have to disable preemption or longjmp. */
11625 VMMRZCallRing3Disable(pVCpu);
11626 HM_DISABLE_PREEMPT_IF_NEEDED();
11627
11628 /* If the guest FPU was active at the time of the #NM exit, then it's a guest fault. */
11629 if (pVmxTransient->fWasGuestFPUStateActive)
11630 {
11631 rc = VINF_EM_RAW_GUEST_TRAP;
11632 Assert(CPUMIsGuestFPUStateActive(pVCpu) || HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));
11633 }
11634 else
11635 {
11636#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11637 Assert(!pVmxTransient->fWasGuestFPUStateActive);
11638#endif
11639 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
11640 Assert(rc == VINF_EM_RAW_GUEST_TRAP || (rc == VINF_SUCCESS && CPUMIsGuestFPUStateActive(pVCpu)));
11641 }
11642
11643 HM_RESTORE_PREEMPT_IF_NEEDED();
11644 VMMRZCallRing3Enable(pVCpu);
11645
11646 if (rc == VINF_SUCCESS)
11647 {
11648 /* Guest FPU state was activated, we'll want to change CR0 FPU intercepts before the next VM-reentry. */
11649 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
11650 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
11651 pVCpu->hm.s.fUseGuestFpu = true;
11652 }
11653 else
11654 {
11655 /* Forward #NM to the guest. */
11656 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
11657 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11658 AssertRCReturn(rc, rc);
11659 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11660 pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);
11661 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
11662 }
11663
11664 return VINF_SUCCESS;
11665}
11666
11667
11668/**
11669 * VM-exit exception handler for #GP (General-protection exception).
11670 *
11671 * @remarks Requires pVmxTransient->uExitIntInfo to be up-to-date.
11672 */
11673static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11674{
11675 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11676 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
11677
11678 int rc = VERR_INTERNAL_ERROR_5;
11679 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
11680 {
11681#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11682 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
11683 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11684 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11685 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11686 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11687 AssertRCReturn(rc, rc);
11688 Log4(("#GP Gst: CS:RIP %04x:%#RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip,
11689 pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));
11690 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11691 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11692 return rc;
11693#else
11694 /* We don't intercept #GP. */
11695 AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n"));
11696 NOREF(pVmxTransient);
11697 return VERR_VMX_UNEXPECTED_EXCEPTION;
11698#endif
11699 }
11700
11701 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
11702 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
11703
11704 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
11705 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11706 AssertRCReturn(rc, rc);
11707
11708 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
11709 uint32_t cbOp = 0;
11710 PVM pVM = pVCpu->CTX_SUFF(pVM);
11711 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
11712 if (RT_SUCCESS(rc))
11713 {
11714 rc = VINF_SUCCESS;
11715 Assert(cbOp == pDis->cbInstr);
11716 Log4(("#GP Disas OpCode=%u CS:EIP %04x:%#RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
11717 switch (pDis->pCurInstr->uOpcode)
11718 {
11719 case OP_CLI:
11720 {
11721 pMixedCtx->eflags.Bits.u1IF = 0;
11722 pMixedCtx->rip += pDis->cbInstr;
11723 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11724 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11725 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
11726 break;
11727 }
11728
11729 case OP_STI:
11730 {
11731 pMixedCtx->eflags.Bits.u1IF = 1;
11732 pMixedCtx->rip += pDis->cbInstr;
11733 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
11734 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
11735 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11736 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11737 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
11738 break;
11739 }
11740
11741 case OP_HLT:
11742 {
11743 rc = VINF_EM_HALT;
11744 pMixedCtx->rip += pDis->cbInstr;
11745 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
11746 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
11747 break;
11748 }
11749
11750 case OP_POPF:
11751 {
11752 Log4(("POPF CS:RIP %04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
11753 uint32_t cbParm;
11754 uint32_t uMask;
11755 bool fStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
11756 if (pDis->fPrefix & DISPREFIX_OPSIZE)
11757 {
11758 cbParm = 4;
11759 uMask = 0xffffffff;
11760 }
11761 else
11762 {
11763 cbParm = 2;
11764 uMask = 0xffff;
11765 }
11766
11767 /* Get the stack pointer & pop the contents of the stack onto Eflags. */
11768 RTGCPTR GCPtrStack = 0;
11769 X86EFLAGS Eflags;
11770 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
11771 &GCPtrStack);
11772 if (RT_SUCCESS(rc))
11773 {
11774 Assert(sizeof(Eflags.u32) >= cbParm);
11775 Eflags.u32 = 0;
11776 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u32, cbParm);
11777 }
11778 if (RT_FAILURE(rc))
11779 {
11780 rc = VERR_EM_INTERPRETER;
11781 break;
11782 }
11783 Log4(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
11784 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
11785 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask);
11786 pMixedCtx->eflags.Bits.u1RF = 0; /* The RF bit is always cleared by POPF; see Intel Instruction reference. */
11787 pMixedCtx->esp += cbParm;
11788 pMixedCtx->esp &= uMask;
11789 pMixedCtx->rip += pDis->cbInstr;
11790 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11791 | HM_CHANGED_GUEST_RSP
11792 | HM_CHANGED_GUEST_RFLAGS);
11793 /* Generate a pending-debug exception when stepping over POPF regardless of how POPF modifies EFLAGS.TF. */
11794 if (fStepping)
11795 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11796
11797 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
11798 break;
11799 }
11800
11801 case OP_PUSHF:
11802 {
11803 uint32_t cbParm;
11804 uint32_t uMask;
11805 if (pDis->fPrefix & DISPREFIX_OPSIZE)
11806 {
11807 cbParm = 4;
11808 uMask = 0xffffffff;
11809 }
11810 else
11811 {
11812 cbParm = 2;
11813 uMask = 0xffff;
11814 }
11815
11816 /* Get the stack pointer & push the contents of eflags onto the stack. */
11817 RTGCPTR GCPtrStack = 0;
11818 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
11819 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
11820 if (RT_FAILURE(rc))
11821 {
11822 rc = VERR_EM_INTERPRETER;
11823 break;
11824 }
11825 X86EFLAGS Eflags = pMixedCtx->eflags;
11826 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
11827 Eflags.Bits.u1RF = 0;
11828 Eflags.Bits.u1VM = 0;
11829
11830 rc = PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u, cbParm);
11831 if (RT_FAILURE(rc))
11832 {
11833 rc = VERR_EM_INTERPRETER;
11834 break;
11835 }
11836 Log4(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack));
11837 pMixedCtx->esp -= cbParm;
11838 pMixedCtx->esp &= uMask;
11839 pMixedCtx->rip += pDis->cbInstr;
11840 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP);
11841 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11842 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
11843 break;
11844 }
11845
11846 case OP_IRET:
11847 {
11848 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
11849 * instruction reference. */
11850 RTGCPTR GCPtrStack = 0;
11851 uint32_t uMask = 0xffff;
11852 bool fStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
11853 uint16_t aIretFrame[3];
11854 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
11855 {
11856 rc = VERR_EM_INTERPRETER;
11857 break;
11858 }
11859 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
11860 &GCPtrStack);
11861 if (RT_SUCCESS(rc))
11862 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame));
11863 if (RT_FAILURE(rc))
11864 {
11865 rc = VERR_EM_INTERPRETER;
11866 break;
11867 }
11868 pMixedCtx->eip = 0;
11869 pMixedCtx->ip = aIretFrame[0];
11870 pMixedCtx->cs.Sel = aIretFrame[1];
11871 pMixedCtx->cs.ValidSel = aIretFrame[1];
11872 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
11873 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
11874 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
11875 pMixedCtx->sp += sizeof(aIretFrame);
11876 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11877 | HM_CHANGED_GUEST_SEGMENT_REGS
11878 | HM_CHANGED_GUEST_RSP
11879 | HM_CHANGED_GUEST_RFLAGS);
11880 /* Generate a pending-debug exception when stepping over IRET regardless of how IRET modifies EFLAGS.TF. */
11881 if (fStepping)
11882 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11883 Log4(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
11884 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
11885 break;
11886 }
11887
11888 case OP_INT:
11889 {
11890 uint16_t uVector = pDis->Param1.uValue & 0xff;
11891 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
11892 /* INT clears EFLAGS.TF, we mustn't set any pending debug exceptions here. */
11893 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
11894 break;
11895 }
11896
11897 case OP_INTO:
11898 {
11899 if (pMixedCtx->eflags.Bits.u1OF)
11900 {
11901 hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
11902 /* INTO clears EFLAGS.TF, we mustn't set any pending debug exceptions here. */
11903 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
11904 }
11905 break;
11906 }
11907
11908 default:
11909 {
11910 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
11911 EMCODETYPE_SUPERVISOR);
11912 rc = VBOXSTRICTRC_VAL(rc2);
11913 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
11914 /** @todo We have to set pending-debug exceptions here when the guest is
11915 * single-stepping depending on the instruction that was interpreted. */
11916 Log4(("#GP rc=%Rrc\n", rc));
11917 break;
11918 }
11919 }
11920 }
11921 else
11922 rc = VERR_EM_INTERPRETER;
11923
11924 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
11925 ("#GP Unexpected rc=%Rrc\n", rc));
11926 return rc;
11927}
11928
11929
11930#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11931/**
11932 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
11933 * the exception reported in the VMX transient structure back into the VM.
11934 *
11935 * @remarks Requires uExitIntInfo in the VMX transient structure to be
11936 * up-to-date.
11937 */
11938static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11939{
11940 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11941
11942 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
11943 hmR0VmxCheckExitDueToEventDelivery(). */
11944 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11945 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11946 AssertRCReturn(rc, rc);
11947 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
11948
11949#ifdef DEBUG_ramshankar
11950 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11951 uint8_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11952 Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip));
11953#endif
11954
11955 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11956 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11957 return VINF_SUCCESS;
11958}
11959#endif
11960
11961
11962/**
11963 * VM-exit exception handler for #PF (Page-fault exception).
11964 */
11965static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11966{
11967 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11968 PVM pVM = pVCpu->CTX_SUFF(pVM);
11969 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11970 rc |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11971 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11972 AssertRCReturn(rc, rc);
11973
11974#if defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) || defined(HMVMX_ALWAYS_TRAP_PF)
11975 if (pVM->hm.s.fNestedPaging)
11976 {
11977 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
11978 if (RT_LIKELY(!pVmxTransient->fVectoringPF))
11979 {
11980 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
11981 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11982 0 /* cbInstr */, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQualification);
11983 }
11984 else
11985 {
11986 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
11987 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
11988 Log4(("Pending #DF due to vectoring #PF. NP\n"));
11989 }
11990 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
11991 return rc;
11992 }
11993#else
11994 Assert(!pVM->hm.s.fNestedPaging);
11995 NOREF(pVM);
11996#endif
11997
11998 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11999 AssertRCReturn(rc, rc);
12000
12001 Log4(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
12002 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntErrorCode, pMixedCtx->cr3));
12003
12004 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
12005 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pMixedCtx),
12006 (RTGCPTR)pVmxTransient->uExitQualification);
12007
12008 Log4(("#PF: rc=%Rrc\n", rc));
12009 if (rc == VINF_SUCCESS)
12010 {
12011 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
12012 /** @todo this isn't quite right, what if guest does lgdt with some MMIO
12013 * memory? We don't update the whole state here... */
12014 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
12015 | HM_CHANGED_GUEST_RSP
12016 | HM_CHANGED_GUEST_RFLAGS
12017 | HM_CHANGED_VMX_GUEST_APIC_STATE);
12018 TRPMResetTrap(pVCpu);
12019 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
12020 return rc;
12021 }
12022 else if (rc == VINF_EM_RAW_GUEST_TRAP)
12023 {
12024 if (!pVmxTransient->fVectoringPF)
12025 {
12026 /* It's a guest page fault and needs to be reflected to the guest. */
12027 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
12028 TRPMResetTrap(pVCpu);
12029 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
12030 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
12031 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12032 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
12033 }
12034 else
12035 {
12036 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
12037 TRPMResetTrap(pVCpu);
12038 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
12039 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
12040 Log4(("#PF: Pending #DF due to vectoring #PF\n"));
12041 }
12042
12043 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
12044 return VINF_SUCCESS;
12045 }
12046
12047 TRPMResetTrap(pVCpu);
12048 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
12049 return rc;
12050}
12051
12052/** @} */
12053
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette