VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 47705

Last change on this file since 47705 was 47687, checked in by vboxsync, 11 years ago

VMM/HMVMXR0: Added segment checks to hmR0VmxCheckGuestState() while using unrestricted execution.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 425.9 KB
Line 
1/* $Id: HMVMXR0.cpp 47687 2013-08-13 12:10:19Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <iprt/asm-amd64-x86.h>
23#include <iprt/thread.h>
24#include <iprt/string.h>
25
26#include "HMInternal.h"
27#include <VBox/vmm/vm.h>
28#include "HMVMXR0.h"
29#include <VBox/vmm/pdmapi.h>
30#include <VBox/vmm/dbgf.h>
31#include <VBox/vmm/iem.h>
32#include <VBox/vmm/iom.h>
33#include <VBox/vmm/selm.h>
34#include <VBox/vmm/tm.h>
35#ifdef VBOX_WITH_REM
36# include <VBox/vmm/rem.h>
37#endif
38#ifdef DEBUG_ramshankar
39#define HMVMX_SAVE_FULL_GUEST_STATE
40#define HMVMX_SYNC_FULL_GUEST_STATE
41#define HMVMX_ALWAYS_CHECK_GUEST_STATE
42#define HMVMX_ALWAYS_TRAP_ALL_XCPTS
43#define HMVMX_ALWAYS_TRAP_PF
44#endif
45
46
47/*******************************************************************************
48* Defined Constants And Macros *
49*******************************************************************************/
50#if defined(RT_ARCH_AMD64)
51# define HMVMX_IS_64BIT_HOST_MODE() (true)
52typedef RTHCUINTREG HMVMXHCUINTREG;
53#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
54extern "C" uint32_t g_fVMXIs64bitHost;
55# define HMVMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)
56typedef uint64_t HMVMXHCUINTREG;
57#else
58# define HMVMX_IS_64BIT_HOST_MODE() (false)
59typedef RTHCUINTREG HMVMXHCUINTREG;
60#endif
61
62/** Use the function table. */
63#define HMVMX_USE_FUNCTION_TABLE
64
65/** Determine which tagged-TLB flush handler to use. */
66#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
67#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
68#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
69#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
70
71/** @name Updated-guest-state flags.
72 * @{ */
73#define HMVMX_UPDATED_GUEST_RIP RT_BIT(0)
74#define HMVMX_UPDATED_GUEST_RSP RT_BIT(1)
75#define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(2)
76#define HMVMX_UPDATED_GUEST_CR0 RT_BIT(3)
77#define HMVMX_UPDATED_GUEST_CR3 RT_BIT(4)
78#define HMVMX_UPDATED_GUEST_CR4 RT_BIT(5)
79#define HMVMX_UPDATED_GUEST_GDTR RT_BIT(6)
80#define HMVMX_UPDATED_GUEST_IDTR RT_BIT(7)
81#define HMVMX_UPDATED_GUEST_LDTR RT_BIT(8)
82#define HMVMX_UPDATED_GUEST_TR RT_BIT(9)
83#define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10)
84#define HMVMX_UPDATED_GUEST_DEBUG RT_BIT(11)
85#define HMVMX_UPDATED_GUEST_FS_BASE_MSR RT_BIT(12)
86#define HMVMX_UPDATED_GUEST_GS_BASE_MSR RT_BIT(13)
87#define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(14)
88#define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(15)
89#define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(16)
90#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(17)
91#define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(18)
92#define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(19)
93#define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \
94 | HMVMX_UPDATED_GUEST_RSP \
95 | HMVMX_UPDATED_GUEST_RFLAGS \
96 | HMVMX_UPDATED_GUEST_CR0 \
97 | HMVMX_UPDATED_GUEST_CR3 \
98 | HMVMX_UPDATED_GUEST_CR4 \
99 | HMVMX_UPDATED_GUEST_GDTR \
100 | HMVMX_UPDATED_GUEST_IDTR \
101 | HMVMX_UPDATED_GUEST_LDTR \
102 | HMVMX_UPDATED_GUEST_TR \
103 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \
104 | HMVMX_UPDATED_GUEST_DEBUG \
105 | HMVMX_UPDATED_GUEST_FS_BASE_MSR \
106 | HMVMX_UPDATED_GUEST_GS_BASE_MSR \
107 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \
108 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \
109 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \
110 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \
111 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \
112 | HMVMX_UPDATED_GUEST_APIC_STATE)
113/** @} */
114
115/** @name
116 * Flags to skip redundant reads of some common VMCS fields that are not part of
117 * the guest-CPU state but are in the transient structure.
118 */
119#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0)
120#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1)
121#define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2)
122#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3)
123#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4)
124#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5)
125/** @} */
126
127/**
128 * Exception bitmap mask for real-mode guests (real-on-v86).
129 *
130 * We need to intercept all exceptions manually (except #PF). #NM is also
131 * handled separately, see hmR0VmxLoadGuestControlRegs(). #PF need not be
132 * intercepted even in real-mode if we have Nested Paging support.
133 */
134#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NMI) \
135 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
136 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \
137 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
138 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
139 | RT_BIT(X86_XCPT_MF) | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) \
140 | RT_BIT(X86_XCPT_XF))
141
142/**
143 * Exception bitmap mask for all contributory exceptions.
144 *
145 * Page fault is deliberately excluded here as it's conditional as to whether
146 * it's contributory or benign. Page faults are handled separately.
147 */
148#define HMVMX_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
149 | RT_BIT(X86_XCPT_DE))
150
151/** Maximum VM-instruction error number. */
152#define HMVMX_INSTR_ERROR_MAX 28
153
154/** Profiling macro. */
155#ifdef HM_PROFILE_EXIT_DISPATCH
156# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
157# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
158#else
159# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
160# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
161#endif
162
163
164/*******************************************************************************
165* Structures and Typedefs *
166*******************************************************************************/
167/**
168 * VMX transient state.
169 *
170 * A state structure for holding miscellaneous information across
171 * VMX non-root operation and restored after the transition.
172 */
173typedef struct VMXTRANSIENT
174{
175 /** The host's rflags/eflags. */
176 RTCCUINTREG uEFlags;
177#if HC_ARCH_BITS == 32
178 uint32_t u32Alignment0;
179#endif
180 /** The guest's LSTAR MSR value used for TPR patching for 32-bit guests. */
181 uint64_t u64LStarMsr;
182 /** The guest's TPR value used for TPR shadowing. */
183 uint8_t u8GuestTpr;
184 /** Alignment. */
185 uint8_t abAlignment0[7];
186
187 /** The basic VM-exit reason. */
188 uint16_t uExitReason;
189 /** Alignment. */
190 uint16_t u16Alignment0;
191 /** The VM-exit interruption error code. */
192 uint32_t uExitIntrErrorCode;
193 /** The VM-exit exit qualification. */
194 uint64_t uExitQualification;
195
196 /** The VM-exit interruption-information field. */
197 uint32_t uExitIntrInfo;
198 /** The VM-exit instruction-length field. */
199 uint32_t cbInstr;
200 /** The VM-exit instruction-information field. */
201 union
202 {
203 /** Plain unsigned int representation. */
204 uint32_t u;
205 /** INS and OUTS information. */
206 struct
207 {
208 uint32_t u6Reserved0 : 6;
209 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
210 uint32_t u3AddrSize : 3;
211 uint32_t u5Reserved1 : 5;
212 /** The segment register (X86_SREG_XXX). */
213 uint32_t iSegReg : 3;
214 uint32_t uReserved2 : 14;
215 } StrIo;
216 } ExitInstrInfo;
217 /** Whether the VM-entry failed or not. */
218 bool fVMEntryFailed;
219 /** Alignment. */
220 uint8_t abAlignment1[3];
221
222 /** The VM-entry interruption-information field. */
223 uint32_t uEntryIntrInfo;
224 /** The VM-entry exception error code field. */
225 uint32_t uEntryXcptErrorCode;
226 /** The VM-entry instruction length field. */
227 uint32_t cbEntryInstr;
228
229 /** IDT-vectoring information field. */
230 uint32_t uIdtVectoringInfo;
231 /** IDT-vectoring error code. */
232 uint32_t uIdtVectoringErrorCode;
233
234 /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */
235 uint32_t fVmcsFieldsRead;
236 /** Whether TSC-offsetting should be setup before VM-entry. */
237 bool fUpdateTscOffsettingAndPreemptTimer;
238 /** Whether the VM-exit was caused by a page-fault during delivery of a
239 * contributory exception or a page-fault. */
240 bool fVectoringPF;
241} VMXTRANSIENT;
242AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
243AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntrInfo, sizeof(uint64_t));
244AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntrInfo, sizeof(uint64_t));
245AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
246/** Pointer to VMX transient state. */
247typedef VMXTRANSIENT *PVMXTRANSIENT;
248
249
250/**
251 * MSR-bitmap read permissions.
252 */
253typedef enum VMXMSREXITREAD
254{
255 /** Reading this MSR causes a VM-exit. */
256 VMXMSREXIT_INTERCEPT_READ = 0xb,
257 /** Reading this MSR does not cause a VM-exit. */
258 VMXMSREXIT_PASSTHRU_READ
259} VMXMSREXITREAD;
260
261/**
262 * MSR-bitmap write permissions.
263 */
264typedef enum VMXMSREXITWRITE
265{
266 /** Writing to this MSR causes a VM-exit. */
267 VMXMSREXIT_INTERCEPT_WRITE = 0xd,
268 /** Writing to this MSR does not cause a VM-exit. */
269 VMXMSREXIT_PASSTHRU_WRITE
270} VMXMSREXITWRITE;
271
272/**
273 * VM-exit handler.
274 *
275 * @returns VBox status code.
276 * @param pVCpu Pointer to the VMCPU.
277 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
278 * out-of-sync. Make sure to update the required
279 * fields before using them.
280 * @param pVmxTransient Pointer to the VMX-transient structure.
281 */
282#ifndef HMVMX_USE_FUNCTION_TABLE
283typedef int FNVMEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
284#else
285typedef DECLCALLBACK(int) FNVMEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
286/** Pointer to VM-exit handler. */
287typedef FNVMEXITHANDLER *PFNVMEXITHANDLER;
288#endif
289
290
291/*******************************************************************************
292* Internal Functions *
293*******************************************************************************/
294static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush);
295static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr);
296static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
297 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState);
298#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
299static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
300#endif
301#ifndef HMVMX_USE_FUNCTION_TABLE
302DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
303# define HMVMX_EXIT_DECL static int
304#else
305# define HMVMX_EXIT_DECL static DECLCALLBACK(int)
306#endif
307
308/** @name VM-exit handlers.
309 * @{
310 */
311static FNVMEXITHANDLER hmR0VmxExitXcptOrNmi;
312static FNVMEXITHANDLER hmR0VmxExitExtInt;
313static FNVMEXITHANDLER hmR0VmxExitTripleFault;
314static FNVMEXITHANDLER hmR0VmxExitInitSignal;
315static FNVMEXITHANDLER hmR0VmxExitSipi;
316static FNVMEXITHANDLER hmR0VmxExitIoSmi;
317static FNVMEXITHANDLER hmR0VmxExitSmi;
318static FNVMEXITHANDLER hmR0VmxExitIntWindow;
319static FNVMEXITHANDLER hmR0VmxExitNmiWindow;
320static FNVMEXITHANDLER hmR0VmxExitTaskSwitch;
321static FNVMEXITHANDLER hmR0VmxExitCpuid;
322static FNVMEXITHANDLER hmR0VmxExitGetsec;
323static FNVMEXITHANDLER hmR0VmxExitHlt;
324static FNVMEXITHANDLER hmR0VmxExitInvd;
325static FNVMEXITHANDLER hmR0VmxExitInvlpg;
326static FNVMEXITHANDLER hmR0VmxExitRdpmc;
327static FNVMEXITHANDLER hmR0VmxExitRdtsc;
328static FNVMEXITHANDLER hmR0VmxExitRsm;
329static FNVMEXITHANDLER hmR0VmxExitSetPendingXcptUD;
330static FNVMEXITHANDLER hmR0VmxExitMovCRx;
331static FNVMEXITHANDLER hmR0VmxExitMovDRx;
332static FNVMEXITHANDLER hmR0VmxExitIoInstr;
333static FNVMEXITHANDLER hmR0VmxExitRdmsr;
334static FNVMEXITHANDLER hmR0VmxExitWrmsr;
335static FNVMEXITHANDLER hmR0VmxExitErrInvalidGuestState;
336static FNVMEXITHANDLER hmR0VmxExitErrMsrLoad;
337static FNVMEXITHANDLER hmR0VmxExitErrUndefined;
338static FNVMEXITHANDLER hmR0VmxExitMwait;
339static FNVMEXITHANDLER hmR0VmxExitMtf;
340static FNVMEXITHANDLER hmR0VmxExitMonitor;
341static FNVMEXITHANDLER hmR0VmxExitPause;
342static FNVMEXITHANDLER hmR0VmxExitErrMachineCheck;
343static FNVMEXITHANDLER hmR0VmxExitTprBelowThreshold;
344static FNVMEXITHANDLER hmR0VmxExitApicAccess;
345static FNVMEXITHANDLER hmR0VmxExitXdtrAccess;
346static FNVMEXITHANDLER hmR0VmxExitXdtrAccess;
347static FNVMEXITHANDLER hmR0VmxExitEptViolation;
348static FNVMEXITHANDLER hmR0VmxExitEptMisconfig;
349static FNVMEXITHANDLER hmR0VmxExitRdtscp;
350static FNVMEXITHANDLER hmR0VmxExitPreemptTimer;
351static FNVMEXITHANDLER hmR0VmxExitWbinvd;
352static FNVMEXITHANDLER hmR0VmxExitXsetbv;
353static FNVMEXITHANDLER hmR0VmxExitRdrand;
354static FNVMEXITHANDLER hmR0VmxExitInvpcid;
355/** @} */
356
357static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
358static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
359static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
360static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
361static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
362static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
363static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
364static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
365
366/*******************************************************************************
367* Global Variables *
368*******************************************************************************/
369#ifdef HMVMX_USE_FUNCTION_TABLE
370
371/**
372 * VMX_EXIT dispatch table.
373 */
374static const PFNVMEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
375{
376 /* 00 VMX_EXIT_XCPT_OR_NMI */ hmR0VmxExitXcptOrNmi,
377 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
378 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
379 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
380 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
381 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
382 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
383 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
384 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
385 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
386 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
387 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
388 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
389 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
390 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
391 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
392 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
393 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
394 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitSetPendingXcptUD,
395 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
396 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
397 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
398 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
399 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
400 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
401 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
402 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
403 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
404 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
405 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
406 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
407 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
408 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
409 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
410 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
411 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
412 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
413 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
414 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
415 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
416 /* 40 UNDEFINED */ hmR0VmxExitPause,
417 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
418 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
419 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
420 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
421 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
422 /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
423 /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
424 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
425 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
426 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
427 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
428 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
429 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
430 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
431 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
432 /* 56 UNDEFINED */ hmR0VmxExitErrUndefined,
433 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
434 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
435 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD
436};
437#endif /* HMVMX_USE_FUNCTION_TABLE */
438
439#ifdef VBOX_STRICT
440static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
441{
442 /* 0 */ "(Not Used)",
443 /* 1 */ "VMCALL executed in VMX root operation.",
444 /* 2 */ "VMCLEAR with invalid physical address.",
445 /* 3 */ "VMCLEAR with VMXON pointer.",
446 /* 4 */ "VMLAUNCH with non-clear VMCS.",
447 /* 5 */ "VMRESUME with non-launched VMCS.",
448 /* 6 */ "VMRESUME after VMXOFF",
449 /* 7 */ "VM entry with invalid control fields.",
450 /* 8 */ "VM entry with invalid host state fields.",
451 /* 9 */ "VMPTRLD with invalid physical address.",
452 /* 10 */ "VMPTRLD with VMXON pointer.",
453 /* 11 */ "VMPTRLD with incorrect revision identifier.",
454 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
455 /* 13 */ "VMWRITE to read-only VMCS component.",
456 /* 14 */ "(Not Used)",
457 /* 15 */ "VMXON executed in VMX root operation.",
458 /* 16 */ "VM entry with invalid executive-VMCS pointer.",
459 /* 17 */ "VM entry with non-launched executing VMCS.",
460 /* 18 */ "VM entry with executive-VMCS pointer not VMXON pointer.",
461 /* 19 */ "VMCALL with non-clear VMCS.",
462 /* 20 */ "VMCALL with invalid VM-exit control fields.",
463 /* 21 */ "(Not Used)",
464 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
465 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
466 /* 24 */ "VMCALL with invalid SMM-monitor features.",
467 /* 25 */ "VM entry with invalid VM-execution control fields in executive VMCS.",
468 /* 26 */ "VM entry with events blocked by MOV SS.",
469 /* 27 */ "(Not Used)",
470 /* 28 */ "Invalid operand to INVEPT/INVVPID."
471};
472#endif /* VBOX_STRICT */
473
474
475
476/**
477 * Updates the VM's last error record. If there was a VMX instruction error,
478 * reads the error data from the VMCS and updates VCPU's last error record as
479 * well.
480 *
481 * @param pVM Pointer to the VM.
482 * @param pVCpu Pointer to the VMCPU (can be NULL if @a rc is not
483 * VERR_VMX_UNABLE_TO_START_VM or
484 * VERR_VMX_INVALID_VMCS_FIELD).
485 * @param rc The error code.
486 */
487static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
488{
489 AssertPtr(pVM);
490 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
491 || rc == VERR_VMX_UNABLE_TO_START_VM)
492 {
493 AssertPtrReturnVoid(pVCpu);
494 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
495 }
496 pVM->hm.s.lLastError = rc;
497}
498
499
500/**
501 * Reads the VM-entry interruption-information field from the VMCS into the VMX
502 * transient structure.
503 *
504 * @returns VBox status code.
505 * @param pVmxTransient Pointer to the VMX transient structure.
506 *
507 * @remarks No-long-jump zone!!!
508 */
509DECLINLINE(int) hmR0VmxReadEntryIntrInfoVmcs(PVMXTRANSIENT pVmxTransient)
510{
511 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntrInfo);
512 AssertRCReturn(rc, rc);
513 return VINF_SUCCESS;
514}
515
516
517/**
518 * Reads the VM-entry exception error code field from the VMCS into
519 * the VMX transient structure.
520 *
521 * @returns VBox status code.
522 * @param pVmxTransient Pointer to the VMX transient structure.
523 *
524 * @remarks No-long-jump zone!!!
525 */
526DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
527{
528 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
529 AssertRCReturn(rc, rc);
530 return VINF_SUCCESS;
531}
532
533
534/**
535 * Reads the VM-entry exception error code field from the VMCS into
536 * the VMX transient structure.
537 *
538 * @returns VBox status code.
539 * @param pVCpu Pointer to the VMCPU.
540 * @param pVmxTransient Pointer to the VMX transient structure.
541 *
542 * @remarks No-long-jump zone!!!
543 */
544DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
545{
546 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
547 AssertRCReturn(rc, rc);
548 return VINF_SUCCESS;
549}
550
551
552/**
553 * Reads the VM-exit interruption-information field from the VMCS into the VMX
554 * transient structure.
555 *
556 * @returns VBox status code.
557 * @param pVCpu Pointer to the VMCPU.
558 * @param pVmxTransient Pointer to the VMX transient structure.
559 */
560DECLINLINE(int) hmR0VmxReadExitIntrInfoVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
561{
562 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
563 {
564 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntrInfo);
565 AssertRCReturn(rc, rc);
566 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;
567 }
568 return VINF_SUCCESS;
569}
570
571
572/**
573 * Reads the VM-exit interruption error code from the VMCS into the VMX
574 * transient structure.
575 *
576 * @returns VBox status code.
577 * @param pVCpu Pointer to the VMCPU.
578 * @param pVmxTransient Pointer to the VMX transient structure.
579 */
580DECLINLINE(int) hmR0VmxReadExitIntrErrorCodeVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
581{
582 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
583 {
584 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntrErrorCode);
585 AssertRCReturn(rc, rc);
586 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
587 }
588 return VINF_SUCCESS;
589}
590
591
592/**
593 * Reads the VM-exit instruction length field from the VMCS into the VMX
594 * transient structure.
595 *
596 * @returns VBox status code.
597 * @param pVCpu Pointer to the VMCPU.
598 * @param pVmxTransient Pointer to the VMX transient structure.
599 */
600DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
601{
602 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
603 {
604 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
605 AssertRCReturn(rc, rc);
606 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
607 }
608 return VINF_SUCCESS;
609}
610
611
612/**
613 * Reads the VM-exit instruction-information field from the VMCS into
614 * the VMX transient structure.
615 *
616 * @returns VBox status code.
617 * @param pVCpu The cross context per CPU structure.
618 * @param pVmxTransient Pointer to the VMX transient structure.
619 */
620DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
621{
622 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
623 {
624 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->cbInstr);
625 AssertRCReturn(rc, rc);
626 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
627 }
628 return VINF_SUCCESS;
629}
630
631
632/**
633 * Reads the exit qualification from the VMCS into the VMX transient structure.
634 *
635 * @returns VBox status code.
636 * @param pVCpu Pointer to the VMCPU.
637 * @param pVmxTransient Pointer to the VMX transient structure.
638 */
639DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
640{
641 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
642 {
643 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification);
644 AssertRCReturn(rc, rc);
645 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION;
646 }
647 return VINF_SUCCESS;
648}
649
650
651/**
652 * Reads the IDT-vectoring information field from the VMCS into the VMX
653 * transient structure.
654 *
655 * @returns VBox status code.
656 * @param pVmxTransient Pointer to the VMX transient structure.
657 *
658 * @remarks No-long-jump zone!!!
659 */
660DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
661{
662 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO))
663 {
664 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
665 AssertRCReturn(rc, rc);
666 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO;
667 }
668 return VINF_SUCCESS;
669}
670
671
672/**
673 * Reads the IDT-vectoring error code from the VMCS into the VMX
674 * transient structure.
675 *
676 * @returns VBox status code.
677 * @param pVmxTransient Pointer to the VMX transient structure.
678 */
679DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
680{
681 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))
682 {
683 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
684 AssertRCReturn(rc, rc);
685 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;
686 }
687 return VINF_SUCCESS;
688}
689
690
691/**
692 * Enters VMX root mode operation on the current CPU.
693 *
694 * @returns VBox status code.
695 * @param pVM Pointer to the VM (optional, can be NULL, after
696 * a resume).
697 * @param HCPhysCpuPage Physical address of the VMXON region.
698 * @param pvCpuPage Pointer to the VMXON region.
699 */
700static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
701{
702 AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
703 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
704 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
705
706 if (pVM)
707 {
708 /* Write the VMCS revision dword to the VMXON region. */
709 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
710 }
711
712 /* Enable the VMX bit in CR4 if necessary. */
713 RTCCUINTREG uCr4 = ASMGetCR4();
714 if (!(uCr4 & X86_CR4_VMXE))
715 ASMSetCR4(uCr4 | X86_CR4_VMXE);
716
717 /* Enter VMX root mode. */
718 int rc = VMXEnable(HCPhysCpuPage);
719 if (RT_FAILURE(rc))
720 ASMSetCR4(uCr4);
721
722 return rc;
723}
724
725
726/**
727 * Exits VMX root mode operation on the current CPU.
728 *
729 * @returns VBox status code.
730 */
731static int hmR0VmxLeaveRootMode(void)
732{
733 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
734
735 /* If we're for some reason not in VMX root mode, then don't leave it. */
736 RTCCUINTREG uHostCR4 = ASMGetCR4();
737 if (uHostCR4 & X86_CR4_VMXE)
738 {
739 /* Exit VMX root mode and clear the VMX bit in CR4. */
740 VMXDisable();
741 ASMSetCR4(uHostCR4 & ~X86_CR4_VMXE);
742 return VINF_SUCCESS;
743 }
744
745 return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
746}
747
748
749/**
750 * Allocates and maps one physically contiguous page. The allocated page is
751 * zero'd out. (Used by various VT-x structures).
752 *
753 * @returns IPRT status code.
754 * @param pMemObj Pointer to the ring-0 memory object.
755 * @param ppVirt Where to store the virtual address of the
756 * allocation.
757 * @param pPhys Where to store the physical address of the
758 * allocation.
759 */
760DECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
761{
762 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
763 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
764 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
765
766 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
767 if (RT_FAILURE(rc))
768 return rc;
769 *ppVirt = RTR0MemObjAddress(*pMemObj);
770 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
771 ASMMemZero32(*ppVirt, PAGE_SIZE);
772 return VINF_SUCCESS;
773}
774
775
776/**
777 * Frees and unmaps an allocated physical page.
778 *
779 * @param pMemObj Pointer to the ring-0 memory object.
780 * @param ppVirt Where to re-initialize the virtual address of
781 * allocation as 0.
782 * @param pHCPhys Where to re-initialize the physical address of the
783 * allocation as 0.
784 */
785DECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
786{
787 AssertPtr(pMemObj);
788 AssertPtr(ppVirt);
789 AssertPtr(pHCPhys);
790 if (*pMemObj != NIL_RTR0MEMOBJ)
791 {
792 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
793 AssertRC(rc);
794 *pMemObj = NIL_RTR0MEMOBJ;
795 *ppVirt = 0;
796 *pHCPhys = 0;
797 }
798}
799
800
801/**
802 * Worker function to free VT-x related structures.
803 *
804 * @returns IPRT status code.
805 * @param pVM Pointer to the VM.
806 */
807static void hmR0VmxStructsFree(PVM pVM)
808{
809 for (VMCPUID i = 0; i < pVM->cCpus; i++)
810 {
811 PVMCPU pVCpu = &pVM->aCpus[i];
812 AssertPtr(pVCpu);
813
814#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
815 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
816 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
817#endif
818
819 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
820 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
821
822 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
823 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
824 }
825
826 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
827#ifdef VBOX_WITH_CRASHDUMP_MAGIC
828 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
829#endif
830}
831
832
833/**
834 * Worker function to allocate VT-x related VM structures.
835 *
836 * @returns IPRT status code.
837 * @param pVM Pointer to the VM.
838 */
839static int hmR0VmxStructsAlloc(PVM pVM)
840{
841 /*
842 * Initialize members up-front so we can cleanup properly on allocation failure.
843 */
844#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
845 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
846 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
847 pVM->hm.s.vmx.HCPhys##a_Name = 0;
848
849#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
850 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
851 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
852 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
853
854#ifdef VBOX_WITH_CRASHDUMP_MAGIC
855 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
856#endif
857 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
858
859 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
860 for (VMCPUID i = 0; i < pVM->cCpus; i++)
861 {
862 PVMCPU pVCpu = &pVM->aCpus[i];
863 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
864 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
865 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
866#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
867 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
868 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
869#endif
870 }
871#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
872#undef VMXLOCAL_INIT_VM_MEMOBJ
873
874 /*
875 * Allocate all the VT-x structures.
876 */
877 int rc = VINF_SUCCESS;
878#ifdef VBOX_WITH_CRASHDUMP_MAGIC
879 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
880 if (RT_FAILURE(rc))
881 goto cleanup;
882 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
883 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
884#endif
885
886 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
887 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
888 {
889 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
890 &pVM->hm.s.vmx.HCPhysApicAccess);
891 if (RT_FAILURE(rc))
892 goto cleanup;
893 }
894
895 /*
896 * Initialize per-VCPU VT-x structures.
897 */
898 for (VMCPUID i = 0; i < pVM->cCpus; i++)
899 {
900 PVMCPU pVCpu = &pVM->aCpus[i];
901 AssertPtr(pVCpu);
902
903 /* Allocate the VM control structure (VMCS). */
904 AssertReturn(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.vmx_basic_info) <= PAGE_SIZE, VERR_INTERNAL_ERROR);
905 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
906 if (RT_FAILURE(rc))
907 goto cleanup;
908
909 /* Allocate the Virtual-APIC page for transparent TPR accesses. */
910 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
911 {
912 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
913 &pVCpu->hm.s.vmx.HCPhysVirtApic);
914 if (RT_FAILURE(rc))
915 goto cleanup;
916 }
917
918 /* Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for transparent accesses of specific MSRs. */
919 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
920 {
921 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
922 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
923 if (RT_FAILURE(rc))
924 goto cleanup;
925 memset(pVCpu->hm.s.vmx.pvMsrBitmap, 0xff, PAGE_SIZE);
926 }
927
928#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
929 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
930 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
931 if (RT_FAILURE(rc))
932 goto cleanup;
933
934 /* Allocate the VM-exit MSR-load page for the host MSRs. */
935 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
936 if (RT_FAILURE(rc))
937 goto cleanup;
938#endif
939 }
940
941 return VINF_SUCCESS;
942
943cleanup:
944 hmR0VmxStructsFree(pVM);
945 return rc;
946}
947
948
949/**
950 * Does global VT-x initialization (called during module initialization).
951 *
952 * @returns VBox status code.
953 */
954VMMR0DECL(int) VMXR0GlobalInit(void)
955{
956#ifdef HMVMX_USE_FUNCTION_TABLE
957 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
958# ifdef VBOX_STRICT
959 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
960 Assert(g_apfnVMExitHandlers[i]);
961# endif
962#endif
963 return VINF_SUCCESS;
964}
965
966
967/**
968 * Does global VT-x termination (called during module termination).
969 */
970VMMR0DECL(void) VMXR0GlobalTerm()
971{
972 /* Nothing to do currently. */
973}
974
975
976/**
977 * Sets up and activates VT-x on the current CPU.
978 *
979 * @returns VBox status code.
980 * @param pCpu Pointer to the global CPU info struct.
981 * @param pVM Pointer to the VM (can be NULL after a host resume
982 * operation).
983 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
984 * fEnabledByHost is true).
985 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
986 * @a fEnabledByHost is true).
987 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
988 * enable VT-x on the host.
989 */
990VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
991{
992 AssertReturn(pCpu, VERR_INVALID_PARAMETER);
993 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
994
995 if (!fEnabledByHost)
996 {
997 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
998 if (RT_FAILURE(rc))
999 return rc;
1000 }
1001
1002 /*
1003 * Flush all EPTP tagged-TLB entries (in case any other hypervisor have been using EPTPs) so that
1004 * we can avoid an explicit flush while using new VPIDs. We would still need to flush
1005 * each time while reusing a VPID after hitting the MaxASID limit once.
1006 */
1007 if ( pVM
1008 && pVM->hm.s.fNestedPaging)
1009 {
1010 /* We require ALL_CONTEXT flush-type to be available on the CPU. See hmR0VmxSetupTaggedTlb(). */
1011 Assert(pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
1012 hmR0VmxFlushEpt(pVM, NULL /* pVCpu */, VMX_FLUSH_EPT_ALL_CONTEXTS);
1013 pCpu->fFlushAsidBeforeUse = false;
1014 }
1015 else
1016 {
1017 /** @todo This is still not perfect. If on host resume (pVM is NULL or a VM
1018 * without Nested Paging triggered this function) we still have the risk
1019 * of potentially running with stale TLB-entries from other hypervisors
1020 * when later we use a VM with NestedPaging. To fix this properly we will
1021 * have to pass '&g_HvmR0' (see HMR0.cpp) to this function and read
1022 * 'vmx_ept_vpid_caps' from it. Sigh. */
1023 pCpu->fFlushAsidBeforeUse = true;
1024 }
1025
1026 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
1027 ++pCpu->cTlbFlushes;
1028
1029 return VINF_SUCCESS;
1030}
1031
1032
1033/**
1034 * Deactivates VT-x on the current CPU.
1035 *
1036 * @returns VBox status code.
1037 * @param pCpu Pointer to the global CPU info struct.
1038 * @param pvCpuPage Pointer to the VMXON region.
1039 * @param HCPhysCpuPage Physical address of the VMXON region.
1040 *
1041 * @remarks This function should never be called when SUPR0EnableVTx() or
1042 * similar was used to enable VT-x on the host.
1043 */
1044VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
1045{
1046 NOREF(pCpu);
1047 NOREF(pvCpuPage);
1048 NOREF(HCPhysCpuPage);
1049
1050 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1051 return hmR0VmxLeaveRootMode();
1052}
1053
1054
1055/**
1056 * Sets the permission bits for the specified MSR in the MSR bitmap.
1057 *
1058 * @param pVCpu Pointer to the VMCPU.
1059 * @param uMSR The MSR value.
1060 * @param enmRead Whether reading this MSR causes a VM-exit.
1061 * @param enmWrite Whether writing this MSR causes a VM-exit.
1062 */
1063static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
1064{
1065 int32_t iBit;
1066 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1067
1068 /*
1069 * Layout:
1070 * 0x000 - 0x3ff - Low MSR read bits
1071 * 0x400 - 0x7ff - High MSR read bits
1072 * 0x800 - 0xbff - Low MSR write bits
1073 * 0xc00 - 0xfff - High MSR write bits
1074 */
1075 if (uMsr <= 0x00001FFF)
1076 iBit = uMsr;
1077 else if ( uMsr >= 0xC0000000
1078 && uMsr <= 0xC0001FFF)
1079 {
1080 iBit = (uMsr - 0xC0000000);
1081 pbMsrBitmap += 0x400;
1082 }
1083 else
1084 {
1085 AssertMsgFailed(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1086 return;
1087 }
1088
1089 Assert(iBit <= 0x1fff);
1090 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
1091 ASMBitSet(pbMsrBitmap, iBit);
1092 else
1093 ASMBitClear(pbMsrBitmap, iBit);
1094
1095 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
1096 ASMBitSet(pbMsrBitmap + 0x800, iBit);
1097 else
1098 ASMBitClear(pbMsrBitmap + 0x800, iBit);
1099}
1100
1101
1102/**
1103 * Flushes the TLB using EPT.
1104 *
1105 * @returns VBox status code.
1106 * @param pVM Pointer to the VM.
1107 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1108 * enmFlush).
1109 * @param enmFlush Type of flush.
1110 */
1111static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush)
1112{
1113 AssertPtr(pVM);
1114 Assert(pVM->hm.s.fNestedPaging);
1115
1116 uint64_t descriptor[2];
1117 if (enmFlush == VMX_FLUSH_EPT_ALL_CONTEXTS)
1118 descriptor[0] = 0;
1119 else
1120 {
1121 Assert(pVCpu);
1122 descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1123 }
1124 descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
1125
1126 int rc = VMXR0InvEPT(enmFlush, &descriptor[0]);
1127 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0,
1128 rc));
1129 if ( RT_SUCCESS(rc)
1130 && pVCpu)
1131 {
1132 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1133 }
1134}
1135
1136
1137/**
1138 * Flushes the TLB using VPID.
1139 *
1140 * @returns VBox status code.
1141 * @param pVM Pointer to the VM.
1142 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1143 * enmFlush).
1144 * @param enmFlush Type of flush.
1145 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1146 * on @a enmFlush).
1147 */
1148static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr)
1149{
1150 AssertPtr(pVM);
1151 Assert(pVM->hm.s.vmx.fVpid);
1152
1153 uint64_t descriptor[2];
1154 if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS)
1155 {
1156 descriptor[0] = 0;
1157 descriptor[1] = 0;
1158 }
1159 else
1160 {
1161 AssertPtr(pVCpu);
1162 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1163 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1164 descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1165 descriptor[1] = GCPtr;
1166 }
1167
1168 int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc);
1169 AssertMsg(rc == VINF_SUCCESS,
1170 ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1171 if ( RT_SUCCESS(rc)
1172 && pVCpu)
1173 {
1174 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1175 }
1176}
1177
1178
1179/**
1180 * Invalidates a guest page by guest virtual address. Only relevant for
1181 * EPT/VPID, otherwise there is nothing really to invalidate.
1182 *
1183 * @returns VBox status code.
1184 * @param pVM Pointer to the VM.
1185 * @param pVCpu Pointer to the VMCPU.
1186 * @param GCVirt Guest virtual address of the page to invalidate.
1187 */
1188VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
1189{
1190 AssertPtr(pVM);
1191 AssertPtr(pVCpu);
1192 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
1193
1194 bool fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1195 if (!fFlushPending)
1196 {
1197 /*
1198 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
1199 * See @bugref{6043} and @bugref{6177}.
1200 *
1201 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
1202 * function maybe called in a loop with individual addresses.
1203 */
1204 if (pVM->hm.s.vmx.fVpid)
1205 {
1206 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1207 {
1208 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt);
1209 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1210 }
1211 else
1212 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1213 }
1214 else if (pVM->hm.s.fNestedPaging)
1215 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1216 }
1217
1218 return VINF_SUCCESS;
1219}
1220
1221
1222/**
1223 * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
1224 * otherwise there is nothing really to invalidate.
1225 *
1226 * @returns VBox status code.
1227 * @param pVM Pointer to the VM.
1228 * @param pVCpu Pointer to the VMCPU.
1229 * @param GCPhys Guest physical address of the page to invalidate.
1230 */
1231VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1232{
1233 LogFlowFunc(("%RGp\n", GCPhys));
1234
1235 /*
1236 * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
1237 * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
1238 * This function might be called in a loop. This should cause a flush-by-EPT if EPT is in use. See @bugref{6568}.
1239 */
1240 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1241 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
1242 return VINF_SUCCESS;
1243}
1244
1245
1246/**
1247 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1248 * case where neither EPT nor VPID is supported by the CPU.
1249 *
1250 * @param pVM Pointer to the VM.
1251 * @param pVCpu Pointer to the VMCPU.
1252 *
1253 * @remarks Called with interrupts disabled.
1254 */
1255static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu)
1256{
1257 NOREF(pVM);
1258 AssertPtr(pVCpu);
1259 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1260 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1261
1262 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1263 AssertPtr(pCpu);
1264
1265 pVCpu->hm.s.TlbShootdown.cPages = 0;
1266 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1267 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1268 pVCpu->hm.s.fForceTLBFlush = false;
1269 return;
1270}
1271
1272
1273/**
1274 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1275 *
1276 * @param pVM Pointer to the VM.
1277 * @param pVCpu Pointer to the VMCPU.
1278 * @remarks All references to "ASID" in this function pertains to "VPID" in
1279 * Intel's nomenclature. The reason is, to avoid confusion in compare
1280 * statements since the host-CPU copies are named "ASID".
1281 *
1282 * @remarks Called with interrupts disabled.
1283 */
1284static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu)
1285{
1286#ifdef VBOX_WITH_STATISTICS
1287 bool fTlbFlushed = false;
1288# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0)
1289# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \
1290 if (!fTlbFlushed) \
1291 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
1292 } while (0)
1293#else
1294# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0)
1295# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0)
1296#endif
1297
1298 AssertPtr(pVM);
1299 AssertPtr(pVCpu);
1300 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1301 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1302 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1303
1304 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1305 AssertPtr(pCpu);
1306
1307 /*
1308 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1309 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1310 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1311 */
1312 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1313 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1314 {
1315 ++pCpu->uCurrentAsid;
1316 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1317 {
1318 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */
1319 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1320 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1321 }
1322
1323 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1324 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1325 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1326
1327 /*
1328 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
1329 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
1330 */
1331 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1332 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1333 HMVMX_SET_TAGGED_TLB_FLUSHED();
1334 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); /* Already flushed-by-EPT, skip doing it again below. */
1335 }
1336
1337 /* Check for explicit TLB shootdowns. */
1338 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1339 {
1340 /*
1341 * Changes to the EPT paging structure by VMM requires flushing by EPT as the CPU creates
1342 * guest-physical (only EPT-tagged) mappings while traversing the EPT tables when EPT is in use.
1343 * Flushing by VPID will only flush linear (only VPID-tagged) and combined (EPT+VPID tagged) mappings
1344 * but not guest-physical mappings.
1345 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". See @bugref{6568}.
1346 */
1347 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1348 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1349 HMVMX_SET_TAGGED_TLB_FLUSHED();
1350 }
1351
1352 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1353 * not be executed. See hmQueueInvlPage() where it is commented
1354 * out. Support individual entry flushing someday. */
1355 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1356 {
1357 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1358
1359 /*
1360 * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
1361 * as supported by the CPU.
1362 */
1363 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1364 {
1365 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1366 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1367 }
1368 else
1369 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1370
1371 HMVMX_SET_TAGGED_TLB_FLUSHED();
1372 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1373 }
1374
1375 pVCpu->hm.s.TlbShootdown.cPages = 0;
1376 pVCpu->hm.s.fForceTLBFlush = false;
1377
1378 HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
1379
1380 Assert(pVCpu->hm.s.idLastCpu == pCpu->idCpu);
1381 Assert(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes);
1382 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1383 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1384 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1385 ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
1386 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1387 ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1388
1389 /* Update VMCS with the VPID. */
1390 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1391 AssertRC(rc);
1392
1393#undef HMVMX_SET_TAGGED_TLB_FLUSHED
1394}
1395
1396
1397/**
1398 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
1399 *
1400 * @returns VBox status code.
1401 * @param pVM Pointer to the VM.
1402 * @param pVCpu Pointer to the VMCPU.
1403 *
1404 * @remarks Called with interrupts disabled.
1405 */
1406static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu)
1407{
1408 AssertPtr(pVM);
1409 AssertPtr(pVCpu);
1410 AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
1411 AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
1412
1413 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1414 AssertPtr(pCpu);
1415
1416 /*
1417 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1418 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
1419 */
1420 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1421 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1422 {
1423 pVCpu->hm.s.fForceTLBFlush = true;
1424 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1425 }
1426
1427 /* Check for explicit TLB shootdown flushes. */
1428 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1429 {
1430 pVCpu->hm.s.fForceTLBFlush = true;
1431 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1432 }
1433
1434 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1435 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1436
1437 if (pVCpu->hm.s.fForceTLBFlush)
1438 {
1439 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1440 pVCpu->hm.s.fForceTLBFlush = false;
1441 }
1442 else
1443 {
1444 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1445 * not be executed. See hmQueueInvlPage() where it is commented
1446 * out. Support individual entry flushing someday. */
1447 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1448 {
1449 /* We cannot flush individual entries without VPID support. Flush using EPT. */
1450 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1451 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1452 }
1453 else
1454 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1455 }
1456
1457 pVCpu->hm.s.TlbShootdown.cPages = 0;
1458 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1459}
1460
1461
1462/**
1463 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
1464 *
1465 * @returns VBox status code.
1466 * @param pVM Pointer to the VM.
1467 * @param pVCpu Pointer to the VMCPU.
1468 *
1469 * @remarks Called with interrupts disabled.
1470 */
1471static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu)
1472{
1473 AssertPtr(pVM);
1474 AssertPtr(pVCpu);
1475 AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
1476 AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
1477
1478 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1479
1480 /*
1481 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
1482 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1483 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1484 */
1485 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1486 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1487 {
1488 pVCpu->hm.s.fForceTLBFlush = true;
1489 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1490 }
1491
1492 /* Check for explicit TLB shootdown flushes. */
1493 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1494 {
1495 /*
1496 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
1497 * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
1498 * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
1499 */
1500 pVCpu->hm.s.fForceTLBFlush = true;
1501 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1502 }
1503
1504 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1505 if (pVCpu->hm.s.fForceTLBFlush)
1506 {
1507 ++pCpu->uCurrentAsid;
1508 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1509 {
1510 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */
1511 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1512 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1513 }
1514
1515 pVCpu->hm.s.fForceTLBFlush = false;
1516 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1517 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1518 if (pCpu->fFlushAsidBeforeUse)
1519 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1520 }
1521 else
1522 {
1523 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
1524 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
1525 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
1526 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
1527
1528 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1529 * not be executed. See hmQueueInvlPage() where it is commented
1530 * out. Support individual entry flushing someday. */
1531 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1532 {
1533 /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */
1534 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1535 {
1536 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1537 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1538 }
1539 else
1540 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1541 }
1542 else
1543 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1544 }
1545
1546 pVCpu->hm.s.TlbShootdown.cPages = 0;
1547 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1548
1549 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1550 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1551 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1552 ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
1553 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1554 ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1555
1556 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1557 AssertRC(rc);
1558}
1559
1560
1561/**
1562 * Flushes the guest TLB entry based on CPU capabilities.
1563 *
1564 * @param pVCpu Pointer to the VMCPU.
1565 */
1566DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu)
1567{
1568 PVM pVM = pVCpu->CTX_SUFF(pVM);
1569 switch (pVM->hm.s.vmx.uFlushTaggedTlb)
1570 {
1571 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu); break;
1572 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu); break;
1573 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu); break;
1574 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu); break;
1575 default:
1576 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
1577 break;
1578 }
1579}
1580
1581
1582/**
1583 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
1584 * TLB entries from the host TLB before VM-entry.
1585 *
1586 * @returns VBox status code.
1587 * @param pVM Pointer to the VM.
1588 */
1589static int hmR0VmxSetupTaggedTlb(PVM pVM)
1590{
1591 /*
1592 * Determine optimal flush type for Nested Paging.
1593 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
1594 * guest execution (see hmR3InitFinalizeR0()).
1595 */
1596 if (pVM->hm.s.fNestedPaging)
1597 {
1598 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
1599 {
1600 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
1601 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_SINGLE_CONTEXT;
1602 else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1603 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_ALL_CONTEXTS;
1604 else
1605 {
1606 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
1607 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1608 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1609 }
1610
1611 /* Make sure the write-back cacheable memory type for EPT is supported. */
1612 if (!(pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))
1613 {
1614 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.msr.vmx_ept_vpid_caps));
1615 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1616 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1617 }
1618 }
1619 else
1620 {
1621 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
1622 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1623 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1624 }
1625 }
1626
1627 /*
1628 * Determine optimal flush type for VPID.
1629 */
1630 if (pVM->hm.s.vmx.fVpid)
1631 {
1632 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
1633 {
1634 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
1635 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT;
1636 else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
1637 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_ALL_CONTEXTS;
1638 else
1639 {
1640 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
1641 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1642 LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
1643 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
1644 LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
1645 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
1646 pVM->hm.s.vmx.fVpid = false;
1647 }
1648 }
1649 else
1650 {
1651 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
1652 Log4(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
1653 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
1654 pVM->hm.s.vmx.fVpid = false;
1655 }
1656 }
1657
1658 /*
1659 * Setup the handler for flushing tagged-TLBs.
1660 */
1661 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
1662 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT_VPID;
1663 else if (pVM->hm.s.fNestedPaging)
1664 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT;
1665 else if (pVM->hm.s.vmx.fVpid)
1666 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_VPID;
1667 else
1668 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_NONE;
1669 return VINF_SUCCESS;
1670}
1671
1672
1673/**
1674 * Sets up pin-based VM-execution controls in the VMCS.
1675 *
1676 * @returns VBox status code.
1677 * @param pVM Pointer to the VM.
1678 * @param pVCpu Pointer to the VMCPU.
1679 */
1680static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
1681{
1682 AssertPtr(pVM);
1683 AssertPtr(pVCpu);
1684
1685 uint32_t val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0; /* Bits set here must always be set. */
1686 uint32_t zap = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1; /* Bits cleared here must always be cleared. */
1687
1688 val |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts causes a VM-exits. */
1689 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts causes a VM-exit. */
1690 Assert(!(val & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI));
1691
1692 /* Enable the VMX preemption timer. */
1693 if (pVM->hm.s.vmx.fUsePreemptTimer)
1694 {
1695 Assert(pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
1696 val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
1697 }
1698
1699 if ((val & zap) != val)
1700 {
1701 LogRel(("hmR0VmxSetupPinCtls: invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
1702 pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0, val, zap));
1703 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1704 }
1705
1706 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, val);
1707 AssertRCReturn(rc, rc);
1708
1709 /* Update VCPU with the currently set pin-based VM-execution controls. */
1710 pVCpu->hm.s.vmx.u32PinCtls = val;
1711 return rc;
1712}
1713
1714
1715/**
1716 * Sets up processor-based VM-execution controls in the VMCS.
1717 *
1718 * @returns VBox status code.
1719 * @param pVM Pointer to the VM.
1720 * @param pVMCPU Pointer to the VMCPU.
1721 */
1722static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
1723{
1724 AssertPtr(pVM);
1725 AssertPtr(pVCpu);
1726
1727 int rc = VERR_INTERNAL_ERROR_5;
1728 uint32_t val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0; /* Bits set here must be set in the VMCS. */
1729 uint32_t zap = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1730
1731 val |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */
1732 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
1733 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
1734 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
1735 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */
1736 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */
1737 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
1738
1739 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
1740 if ( !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
1741 || (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
1742 {
1743 LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
1744 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1745 }
1746
1747 /* Without Nested Paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
1748 if (!pVM->hm.s.fNestedPaging)
1749 {
1750 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
1751 val |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
1752 | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
1753 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
1754 }
1755
1756 /* Use TPR shadowing if supported by the CPU. */
1757 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
1758 {
1759 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
1760 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
1761 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
1762 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
1763 AssertRCReturn(rc, rc);
1764
1765 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
1766 /* CR8 writes causes a VM-exit based on TPR threshold. */
1767 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));
1768 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));
1769 }
1770 else
1771 {
1772 val |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT /* CR8 reads causes a VM-exit. */
1773 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes causes a VM-exit. */
1774 }
1775
1776 /* Use MSR-bitmaps if supported by the CPU. */
1777 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1778 {
1779 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
1780
1781 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1782 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
1783 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1784 AssertRCReturn(rc, rc);
1785
1786 /*
1787 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
1788 * automatically (either as part of the MSR-load/store areas or dedicated fields in the VMCS).
1789 */
1790 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1791 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1792 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1793 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1794 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1795 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1796 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1797 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1798 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1799 }
1800
1801 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
1802 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1803 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
1804
1805 if ((val & zap) != val)
1806 {
1807 LogRel(("hmR0VmxSetupProcCtls: invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
1808 pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0, val, zap));
1809 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1810 }
1811
1812 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, val);
1813 AssertRCReturn(rc, rc);
1814
1815 /* Update VCPU with the currently set processor-based VM-execution controls. */
1816 pVCpu->hm.s.vmx.u32ProcCtls = val;
1817
1818 /*
1819 * Secondary processor-based VM-execution controls.
1820 */
1821 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
1822 {
1823 val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
1824 zap = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1825
1826 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
1827 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */
1828
1829 if (pVM->hm.s.fNestedPaging)
1830 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */
1831 else
1832 {
1833 /*
1834 * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
1835 * VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT when INVPCID is executed by the guest.
1836 * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
1837 */
1838 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
1839 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
1840 }
1841
1842 if (pVM->hm.s.vmx.fVpid)
1843 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */
1844
1845 if (pVM->hm.s.vmx.fUnrestrictedGuest)
1846 val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */
1847
1848 /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
1849 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
1850 * done dynamically. */
1851 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
1852 {
1853 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
1854 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
1855 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
1856 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
1857 AssertRCReturn(rc, rc);
1858 }
1859
1860 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
1861 {
1862 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */
1863 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1864 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1865 }
1866
1867 if ((val & zap) != val)
1868 {
1869 LogRel(("hmR0VmxSetupProcCtls: invalid secondary processor-based VM-execution controls combo! "
1870 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0, val, zap));
1871 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1872 }
1873
1874 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, val);
1875 AssertRCReturn(rc, rc);
1876
1877 /* Update VCPU with the currently set secondary processor-based VM-execution controls. */
1878 pVCpu->hm.s.vmx.u32ProcCtls2 = val;
1879 }
1880 else if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest))
1881 {
1882 LogRel(("hmR0VmxSetupProcCtls: Unrestricted Guest set as true when secondary processor-based VM-execution controls not "
1883 "available\n"));
1884 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1885 }
1886
1887 return VINF_SUCCESS;
1888}
1889
1890
1891/**
1892 * Sets up miscellaneous (everything other than Pin & Processor-based
1893 * VM-execution) control fields in the VMCS.
1894 *
1895 * @returns VBox status code.
1896 * @param pVM Pointer to the VM.
1897 * @param pVCpu Pointer to the VMCPU.
1898 */
1899static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
1900{
1901 AssertPtr(pVM);
1902 AssertPtr(pVCpu);
1903
1904 int rc = VERR_GENERAL_FAILURE;
1905
1906 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
1907#if 0
1908 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestControlRegs())*/
1909 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); AssertRCReturn(rc, rc);
1910 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); AssertRCReturn(rc, rc);
1911
1912 /*
1913 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
1914 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
1915 * We thus use the exception bitmap to control it rather than use both.
1916 */
1917 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0); AssertRCReturn(rc, rc);
1918 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0); AssertRCReturn(rc, rc);
1919
1920 /** @todo Explore possibility of using IO-bitmaps. */
1921 /* All IO & IOIO instructions cause VM-exits. */
1922 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); AssertRCReturn(rc, rc);
1923 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); AssertRCReturn(rc, rc);
1924
1925 /* Initialize the MSR-bitmap area. */
1926 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
1927 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); AssertRCReturn(rc, rc);
1928 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
1929#endif
1930
1931#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
1932 /* Setup MSR autoloading/storing. */
1933 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
1934 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
1935 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
1936 AssertRCReturn(rc, rc);
1937 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
1938 AssertRCReturn(rc, rc);
1939
1940 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
1941 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
1942 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
1943 AssertRCReturn(rc, rc);
1944#endif
1945
1946 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
1947 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
1948 AssertRCReturn(rc, rc);
1949
1950 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
1951#if 0
1952 /* Setup debug controls */
1953 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */
1954 AssertRCReturn(rc, rc);
1955 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
1956 AssertRCReturn(rc, rc);
1957#endif
1958
1959 return rc;
1960}
1961
1962
1963/**
1964 * Sets up the initial exception bitmap in the VMCS based on static conditions
1965 * (i.e. conditions that cannot ever change at runtime).
1966 *
1967 * @returns VBox status code.
1968 * @param pVM Pointer to the VM.
1969 * @param pVCpu Pointer to the VMCPU.
1970 */
1971static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
1972{
1973 AssertPtr(pVM);
1974 AssertPtr(pVCpu);
1975
1976 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
1977
1978 uint32_t u32XcptBitmap = 0;
1979
1980 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
1981 if (!pVM->hm.s.fNestedPaging)
1982 u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
1983
1984 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
1985 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
1986 AssertRCReturn(rc, rc);
1987 return rc;
1988}
1989
1990
1991/**
1992 * Sets up the initial guest-state mask. The guest-state mask is consulted
1993 * before reading guest-state fields from the VMCS as VMREADs can be expensive
1994 * for the nested virtualization case (as it would cause a VM-exit).
1995 *
1996 * @param pVCpu Pointer to the VMCPU.
1997 */
1998static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
1999{
2000 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
2001 pVCpu->hm.s.vmx.fUpdatedGuestState = HMVMX_UPDATED_GUEST_ALL;
2002 return VINF_SUCCESS;
2003}
2004
2005
2006/**
2007 * Does per-VM VT-x initialization.
2008 *
2009 * @returns VBox status code.
2010 * @param pVM Pointer to the VM.
2011 */
2012VMMR0DECL(int) VMXR0InitVM(PVM pVM)
2013{
2014 LogFlowFunc(("pVM=%p\n", pVM));
2015
2016 int rc = hmR0VmxStructsAlloc(pVM);
2017 if (RT_FAILURE(rc))
2018 {
2019 LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
2020 return rc;
2021 }
2022
2023 return VINF_SUCCESS;
2024}
2025
2026
2027/**
2028 * Does per-VM VT-x termination.
2029 *
2030 * @returns VBox status code.
2031 * @param pVM Pointer to the VM.
2032 */
2033VMMR0DECL(int) VMXR0TermVM(PVM pVM)
2034{
2035 LogFlowFunc(("pVM=%p\n", pVM));
2036
2037#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2038 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
2039 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
2040#endif
2041 hmR0VmxStructsFree(pVM);
2042 return VINF_SUCCESS;
2043}
2044
2045
2046/**
2047 * Sets up the VM for execution under VT-x.
2048 * This function is only called once per-VM during initialization.
2049 *
2050 * @returns VBox status code.
2051 * @param pVM Pointer to the VM.
2052 */
2053VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
2054{
2055 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
2056 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2057
2058 LogFlowFunc(("pVM=%p\n", pVM));
2059
2060 /*
2061 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
2062 * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0().
2063 */
2064 /* -XXX- change hmR3InitFinalizeR0Intel() to fail if pRealModeTSS alloc fails. */
2065 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
2066 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
2067 || !pVM->hm.s.vmx.pRealModeTSS))
2068 {
2069 LogRel(("VMXR0SetupVM: invalid real-on-v86 state.\n"));
2070 return VERR_INTERNAL_ERROR;
2071 }
2072
2073#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2074 /*
2075 * This is for the darwin 32-bit/PAE kernels trying to execute 64-bit guests. We don't bother with
2076 * the 32<->64 switcher in this case. This is a rare, legacy use-case with barely any test coverage.
2077 */
2078 if ( pVM->hm.s.fAllow64BitGuests
2079 && !HMVMX_IS_64BIT_HOST_MODE())
2080 {
2081 LogRel(("VMXR0SetupVM: Unsupported guest and host paging mode combination.\n"));
2082 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
2083 }
2084#endif
2085
2086 /* Initialize these always, see hmR3InitFinalizeR0().*/
2087 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NONE;
2088 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NONE;
2089
2090 /* Setup the tagged-TLB flush handlers. */
2091 int rc = hmR0VmxSetupTaggedTlb(pVM);
2092 if (RT_FAILURE(rc))
2093 {
2094 LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
2095 return rc;
2096 }
2097
2098 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2099 {
2100 PVMCPU pVCpu = &pVM->aCpus[i];
2101 AssertPtr(pVCpu);
2102 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
2103
2104 /* Log the VCPU pointers, useful for debugging SMP VMs. */
2105 Log4(("VMXR0SetupVM: pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
2106
2107 /* Set revision dword at the beginning of the VMCS structure. */
2108 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
2109
2110 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
2111 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
2112 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2113 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2114
2115 /* Load this VMCS as the current VMCS. */
2116 rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
2117 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2118 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2119
2120 rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
2121 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2122 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2123
2124 rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
2125 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2126 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2127
2128 rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
2129 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2130 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2131
2132 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
2133 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2134 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2135
2136 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
2137 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2138 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2139
2140#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2141 rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
2142 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2143 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2144#endif
2145
2146 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
2147 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
2148 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2149 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2150
2151 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
2152 }
2153
2154 return VINF_SUCCESS;
2155}
2156
2157
2158/**
2159 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
2160 * the VMCS.
2161 *
2162 * @returns VBox status code.
2163 * @param pVM Pointer to the VM.
2164 * @param pVCpu Pointer to the VMCPU.
2165 */
2166DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
2167{
2168 RTCCUINTREG uReg = ASMGetCR0();
2169 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
2170 AssertRCReturn(rc, rc);
2171
2172#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2173 /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */
2174 if (HMVMX_IS_64BIT_HOST_MODE())
2175 {
2176 uint64_t uRegCR3 = HMR0Get64bitCR3();
2177 rc = VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uRegCR3);
2178 }
2179 else
2180#endif
2181 {
2182 uReg = ASMGetCR3();
2183 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2184 }
2185 AssertRCReturn(rc, rc);
2186
2187 uReg = ASMGetCR4();
2188 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2189 AssertRCReturn(rc, rc);
2190 return rc;
2191}
2192
2193
2194/**
2195 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2196 * the host-state area in the VMCS.
2197 *
2198 * @returns VBox status code.
2199 * @param pVM Pointer to the VM.
2200 * @param pVCpu Pointer to the VMCPU.
2201 */
2202DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
2203{
2204 int rc = VERR_INTERNAL_ERROR_5;
2205 RTSEL uSelDS = 0;
2206 RTSEL uSelES = 0;
2207 RTSEL uSelFS = 0;
2208 RTSEL uSelGS = 0;
2209 RTSEL uSelTR = 0;
2210
2211 /*
2212 * Host DS, ES, FS and GS segment registers.
2213 */
2214#if HC_ARCH_BITS == 64
2215 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
2216 uSelDS = ASMGetDS();
2217 uSelES = ASMGetES();
2218 uSelFS = ASMGetFS();
2219 uSelGS = ASMGetGS();
2220#endif
2221
2222 /*
2223 * Host CS and SS segment registers.
2224 */
2225 RTSEL uSelCS;
2226 RTSEL uSelSS;
2227#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2228 if (HMVMX_IS_64BIT_HOST_MODE())
2229 {
2230 uSelCS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS;
2231 uSelSS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelSS;
2232 }
2233 else
2234 {
2235 /* Seems darwin uses the LDT (TI flag is set) in the CS & SS selectors which VT-x doesn't like. */
2236 uSelCS = (RTSEL)(uintptr_t)&SUPR0AbsKernelCS;
2237 uSelSS = (RTSEL)(uintptr_t)&SUPR0AbsKernelSS;
2238 }
2239#else
2240 uSelCS = ASMGetCS();
2241 uSelSS = ASMGetSS();
2242#endif
2243
2244 /*
2245 * Host TR segment register.
2246 */
2247 uSelTR = ASMGetTR();
2248
2249#if HC_ARCH_BITS == 64
2250 /*
2251 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to gain VM-entry and restore them
2252 * before we get preempted. See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
2253 */
2254 if (uSelDS & (X86_SEL_RPL | X86_SEL_LDT))
2255 {
2256 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_DS;
2257 pVCpu->hm.s.vmx.RestoreHost.uHostSelDS = uSelDS;
2258 uSelDS = 0;
2259 }
2260 if (uSelES & (X86_SEL_RPL | X86_SEL_LDT))
2261 {
2262 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_ES;
2263 pVCpu->hm.s.vmx.RestoreHost.uHostSelES = uSelES;
2264 uSelES = 0;
2265 }
2266 if (uSelFS & (X86_SEL_RPL | X86_SEL_LDT))
2267 {
2268 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_FS;
2269 pVCpu->hm.s.vmx.RestoreHost.uHostSelFS = uSelFS;
2270 uSelFS = 0;
2271 }
2272 if (uSelGS & (X86_SEL_RPL | X86_SEL_LDT))
2273 {
2274 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_GS;
2275 pVCpu->hm.s.vmx.RestoreHost.uHostSelGS = uSelGS;
2276 uSelGS = 0;
2277 }
2278#endif
2279
2280 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2281 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2282 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2283 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2284 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2285 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2286 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2287 Assert(!(uSelTR & X86_SEL_RPL)); Assert(!(uSelTR & X86_SEL_LDT));
2288 Assert(uSelCS);
2289 Assert(uSelTR);
2290
2291 /* Assertion is right but we would not have updated u32ExitCtls yet. */
2292#if 0
2293 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE))
2294 Assert(uSelSS != 0);
2295#endif
2296
2297 /* Write these host selector fields into the host-state area in the VMCS. */
2298 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS); AssertRCReturn(rc, rc);
2299 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS); AssertRCReturn(rc, rc);
2300#if HC_ARCH_BITS == 64
2301 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS); AssertRCReturn(rc, rc);
2302 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES); AssertRCReturn(rc, rc);
2303 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS); AssertRCReturn(rc, rc);
2304 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS); AssertRCReturn(rc, rc);
2305#endif
2306 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR); AssertRCReturn(rc, rc);
2307
2308 /*
2309 * Host GDTR and IDTR.
2310 */
2311 RTGDTR Gdtr;
2312 RT_ZERO(Gdtr);
2313#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2314 if (HMVMX_IS_64BIT_HOST_MODE())
2315 {
2316 X86XDTR64 Gdtr64;
2317 X86XDTR64 Idtr64;
2318 HMR0Get64bitGdtrAndIdtr(&Gdtr64, &Idtr64);
2319 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr); AssertRCReturn(rc, rc);
2320 rc = VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr); AssertRCReturn(rc, rc);
2321
2322 Gdtr.cbGdt = Gdtr64.cb;
2323 Gdtr.pGdt = (uintptr_t)Gdtr64.uAddr;
2324 }
2325 else
2326#endif
2327 {
2328 RTIDTR Idtr;
2329 ASMGetGDTR(&Gdtr);
2330 ASMGetIDTR(&Idtr);
2331 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); AssertRCReturn(rc, rc);
2332 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); AssertRCReturn(rc, rc);
2333
2334#if HC_ARCH_BITS == 64
2335 /*
2336 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the
2337 * maximum limit (0xffff) on every VM-exit.
2338 */
2339 if (Gdtr.cbGdt != 0xffff)
2340 {
2341 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
2342 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
2343 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
2344 }
2345
2346 /*
2347 * IDT limit is practically 0xfff. Therefore if the host has the limit as 0xfff, VT-x bloating the limit to 0xffff
2348 * is not a problem as it's not possible to get at them anyway. See Intel spec. 6.14.1 "64-Bit Mode IDT" and
2349 * Intel spec. 6.2 "Exception and Interrupt Vectors".
2350 */
2351 if (Idtr.cbIdt < 0x0fff)
2352 {
2353 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
2354 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
2355 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
2356 }
2357#endif
2358 }
2359
2360 /*
2361 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
2362 * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
2363 */
2364 if ((uSelTR & X86_SEL_MASK) > Gdtr.cbGdt)
2365 {
2366 AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt));
2367 return VERR_VMX_INVALID_HOST_STATE;
2368 }
2369
2370 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
2371#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2372 if (HMVMX_IS_64BIT_HOST_MODE())
2373 {
2374 /* We need the 64-bit TR base for hybrid darwin. */
2375 uint64_t u64TRBase = X86DESC64_BASE((PX86DESC64)pDesc);
2376 rc = VMXWriteVmcs64(VMX_VMCS_HOST_TR_BASE, u64TRBase);
2377 }
2378 else
2379#endif
2380 {
2381 uintptr_t uTRBase;
2382#if HC_ARCH_BITS == 64
2383 uTRBase = X86DESC64_BASE(pDesc);
2384
2385 /*
2386 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits.
2387 * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else.
2388 * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode.
2389 * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
2390 *
2391 * [1] See Intel spec. 3.5 "System Descriptor Types".
2392 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
2393 */
2394 Assert(pDesc->System.u4Type == 11);
2395 if ( pDesc->System.u16LimitLow != 0x67
2396 || pDesc->System.u4LimitHigh)
2397 {
2398 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
2399 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR;
2400
2401 /* Store the GDTR here as we need it while restoring TR. */
2402 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
2403 }
2404#else
2405 uTRBase = X86DESC_BASE(pDesc);
2406#endif
2407 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
2408 }
2409 AssertRCReturn(rc, rc);
2410
2411 /*
2412 * Host FS base and GS base.
2413 */
2414#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2415 if (HMVMX_IS_64BIT_HOST_MODE())
2416 {
2417 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
2418 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
2419 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); AssertRCReturn(rc, rc);
2420 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); AssertRCReturn(rc, rc);
2421
2422# if HC_ARCH_BITS == 64
2423 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
2424 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
2425 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
2426 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
2427 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
2428# endif
2429 }
2430#endif
2431 return rc;
2432}
2433
2434
2435/**
2436 * Saves certain host MSRs in the VM-Exit MSR-load area and some in the
2437 * host-state area of the VMCS. Theses MSRs will be automatically restored on
2438 * the host after every successful VM exit.
2439 *
2440 * @returns VBox status code.
2441 * @param pVM Pointer to the VM.
2442 * @param pVCpu Pointer to the VMCPU.
2443 */
2444DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
2445{
2446 AssertPtr(pVCpu);
2447 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
2448
2449 int rc = VINF_SUCCESS;
2450#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
2451 PVMXMSR pHostMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvHostMsr;
2452 uint32_t cHostMsrs = 0;
2453 uint32_t u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX;
2454
2455 if (u32HostExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
2456 {
2457 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
2458
2459# if HC_ARCH_BITS == 64
2460 /* Paranoia. 64-bit code requires these bits to be set always. */
2461 Assert((u64HostEfer & (MSR_K6_EFER_LMA | MSR_K6_EFER_LME)) == (MSR_K6_EFER_LMA | MSR_K6_EFER_LME));
2462
2463 /*
2464 * We currently do not save/restore host EFER, we just make sure it doesn't get modified by VT-x operation.
2465 * All guest accesses (read, write) on EFER cause VM-exits. If we are to conditionally load the guest EFER for
2466 * some reason (e.g. allow transparent reads) we would activate the code below.
2467 */
2468# if 0
2469 /* All our supported 64-bit host platforms must have NXE bit set. Otherwise we can change the below code to save EFER. */
2470 Assert(u64HostEfer & (MSR_K6_EFER_NXE));
2471 /* The SCE bit is only applicable in 64-bit mode. Save EFER if it doesn't match what the guest has.
2472 See Intel spec. 30.10.4.3 "Handling the SYSCALL and SYSRET Instructions". */
2473 if (CPUMIsGuestInLongMode(pVCpu))
2474 {
2475 uint64_t u64GuestEfer;
2476 rc = CPUMQueryGuestMsr(pVCpu, MSR_K6_EFER, &u64GuestEfer);
2477 AssertRC(rc);
2478
2479 if ((u64HostEfer & MSR_K6_EFER_SCE) != (u64GuestEfer & MSR_K6_EFER_SCE))
2480 {
2481 pHostMsr->u32IndexMSR = MSR_K6_EFER;
2482 pHostMsr->u32Reserved = 0;
2483 pHostMsr->u64Value = u64HostEfer;
2484 pHostMsr++; cHostMsrs++;
2485 }
2486 }
2487# endif
2488# else /* HC_ARCH_BITS != 64 */
2489 pHostMsr->u32IndexMSR = MSR_K6_EFER;
2490 pHostMsr->u32Reserved = 0;
2491# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2492 if (CPUMIsGuestInLongMode(pVCpu))
2493 {
2494 /* Must match the EFER value in our 64 bits switcher. */
2495 pHostMsr->u64Value = u64HostEfer | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE;
2496 }
2497 else
2498# endif
2499 pHostMsr->u64Value = u64HostEfer;
2500 pHostMsr++; cHostMsrs++;
2501# endif /* HC_ARCH_BITS == 64 */
2502 }
2503
2504# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2505 if (HMVMX_IS_64BIT_HOST_MODE())
2506 {
2507 pHostMsr->u32IndexMSR = MSR_K6_STAR;
2508 pHostMsr->u32Reserved = 0;
2509 pHostMsr->u64Value = ASMRdMsr(MSR_K6_STAR); /* legacy syscall eip, cs & ss */
2510 pHostMsr++; cHostMsrs++;
2511 pHostMsr->u32IndexMSR = MSR_K8_LSTAR;
2512 pHostMsr->u32Reserved = 0;
2513 pHostMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64-bit mode syscall rip */
2514 pHostMsr++; cHostMsrs++;
2515 pHostMsr->u32IndexMSR = MSR_K8_SF_MASK;
2516 pHostMsr->u32Reserved = 0;
2517 pHostMsr->u64Value = ASMRdMsr(MSR_K8_SF_MASK); /* syscall flag mask */
2518 pHostMsr++; cHostMsrs++;
2519 pHostMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
2520 pHostMsr->u32Reserved = 0;
2521 pHostMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */
2522 pHostMsr++; cHostMsrs++;
2523 }
2524# endif
2525
2526 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
2527 if (RT_UNLIKELY(cHostMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)))
2528 {
2529 LogRel(("cHostMsrs=%u Cpu=%u\n", cHostMsrs, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)));
2530 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2531 }
2532
2533 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs);
2534#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
2535
2536 /*
2537 * Host Sysenter MSRs.
2538 */
2539 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
2540 AssertRCReturn(rc, rc);
2541#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2542 if (HMVMX_IS_64BIT_HOST_MODE())
2543 {
2544 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
2545 AssertRCReturn(rc, rc);
2546 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
2547 }
2548 else
2549 {
2550 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
2551 AssertRCReturn(rc, rc);
2552 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
2553 }
2554#elif HC_ARCH_BITS == 32
2555 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
2556 AssertRCReturn(rc, rc);
2557 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
2558#else
2559 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
2560 AssertRCReturn(rc, rc);
2561 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
2562#endif
2563 AssertRCReturn(rc, rc);
2564
2565 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT, IA32_EFER, also see
2566 * hmR0VmxSetupExitCtls() !! */
2567 return rc;
2568}
2569
2570
2571/**
2572 * Sets up VM-entry controls in the VMCS. These controls can affect things done
2573 * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
2574 * controls".
2575 *
2576 * @returns VBox status code.
2577 * @param pVCpu Pointer to the VMCPU.
2578 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2579 * out-of-sync. Make sure to update the required fields
2580 * before using them.
2581 *
2582 * @remarks No-long-jump zone!!!
2583 */
2584DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2585{
2586 int rc = VINF_SUCCESS;
2587 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_ENTRY_CTLS)
2588 {
2589 PVM pVM = pVCpu->CTX_SUFF(pVM);
2590 uint32_t val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0; /* Bits set here must be set in the VMCS. */
2591 uint32_t zap = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2592
2593 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
2594 val |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
2595
2596 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
2597 if (CPUMIsGuestInLongModeEx(pMixedCtx))
2598 val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
2599 else
2600 Assert(!(val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
2601
2602 /*
2603 * The following should not be set (since we're not in SMM mode):
2604 * - VMX_VMCS_CTRL_ENTRY_ENTRY_SMM
2605 * - VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON
2606 */
2607
2608 /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR,
2609 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR,
2610 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR */
2611
2612 if ((val & zap) != val)
2613 {
2614 LogRel(("hmR0VmxLoadGuestEntryCtls: invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
2615 pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0, val, zap));
2616 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2617 }
2618
2619 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, val);
2620 AssertRCReturn(rc, rc);
2621
2622 /* Update VCPU with the currently set VM-exit controls. */
2623 pVCpu->hm.s.vmx.u32EntryCtls = val;
2624 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_ENTRY_CTLS;
2625 }
2626 return rc;
2627}
2628
2629
2630/**
2631 * Sets up the VM-exit controls in the VMCS.
2632 *
2633 * @returns VBox status code.
2634 * @param pVM Pointer to the VM.
2635 * @param pVCpu Pointer to the VMCPU.
2636 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2637 * out-of-sync. Make sure to update the required fields
2638 * before using them.
2639 *
2640 * @remarks requires EFER.
2641 */
2642DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2643{
2644 int rc = VINF_SUCCESS;
2645 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_EXIT_CTLS)
2646 {
2647 PVM pVM = pVCpu->CTX_SUFF(pVM);
2648 uint32_t val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0; /* Bits set here must be set in the VMCS. */
2649 uint32_t zap = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2650
2651 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
2652 val |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
2653
2654 /*
2655 * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary.
2656 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in hmR0VmxSaveHostMsrs().
2657 */
2658#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2659 if (HMVMX_IS_64BIT_HOST_MODE())
2660 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
2661 else
2662 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
2663#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
2664 if (CPUMIsGuestInLongModeEx(pMixedCtx))
2665 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE; /* The switcher goes to long mode. */
2666 else
2667 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
2668#endif
2669
2670 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
2671 Assert(!(val & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));
2672
2673 /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
2674 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR,
2675 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR,
2676 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR,
2677 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR. */
2678
2679 if (pVM->hm.s.vmx.msr.vmx_exit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)
2680 val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
2681
2682 if ((val & zap) != val)
2683 {
2684 LogRel(("hmR0VmxSetupProcCtls: invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
2685 pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0, val, zap));
2686 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2687 }
2688
2689 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, val);
2690 AssertRCReturn(rc, rc);
2691
2692 /* Update VCPU with the currently set VM-exit controls. */
2693 pVCpu->hm.s.vmx.u32ExitCtls = val;
2694 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_EXIT_CTLS;
2695 }
2696 return rc;
2697}
2698
2699
2700/**
2701 * Loads the guest APIC and related state.
2702 *
2703 * @returns VBox status code.
2704 * @param pVM Pointer to the VM.
2705 * @param pVCpu Pointer to the VMCPU.
2706 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2707 * out-of-sync. Make sure to update the required fields
2708 * before using them.
2709 */
2710DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2711{
2712 int rc = VINF_SUCCESS;
2713 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_APIC_STATE)
2714 {
2715 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
2716 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
2717 {
2718 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2719
2720 bool fPendingIntr = false;
2721 uint8_t u8Tpr = 0;
2722 uint8_t u8PendingIntr = 0;
2723 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
2724 AssertRCReturn(rc, rc);
2725
2726 /*
2727 * If there are external interrupts pending but masked by the TPR value, instruct VT-x to cause a VM-exit when
2728 * the guest lowers its TPR below the highest-priority pending interrupt and we can deliver the interrupt.
2729 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
2730 * the interrupt when we VM-exit for other reasons.
2731 */
2732 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
2733 uint32_t u32TprThreshold = 0;
2734 if (fPendingIntr)
2735 {
2736 /* Bits 3-0 of the TPR threshold field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
2737 const uint8_t u8PendingPriority = (u8PendingIntr >> 4);
2738 const uint8_t u8TprPriority = (u8Tpr >> 4) & 7;
2739 if (u8PendingPriority <= u8TprPriority)
2740 u32TprThreshold = u8PendingPriority;
2741 else
2742 u32TprThreshold = u8TprPriority; /* Required for Vista 64-bit guest, see @bugref{6398}. */
2743 }
2744 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
2745
2746 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
2747 AssertRCReturn(rc, rc);
2748 }
2749
2750 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_APIC_STATE;
2751 }
2752 return rc;
2753}
2754
2755
2756/**
2757 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
2758 *
2759 * @returns Guest's interruptibility-state.
2760 * @param pVCpu Pointer to the VMCPU.
2761 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2762 * out-of-sync. Make sure to update the required fields
2763 * before using them.
2764 *
2765 * @remarks No-long-jump zone!!!
2766 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
2767 */
2768DECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2769{
2770 /*
2771 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
2772 * inhibit interrupts or clear any existing interrupt-inhibition.
2773 */
2774 uint32_t uIntrState = 0;
2775 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2776 {
2777 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
2778 AssertMsg((pVCpu->hm.s.vmx.fUpdatedGuestState & (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS))
2779 == (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS), ("%#x\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
2780 if (pMixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
2781 {
2782 /*
2783 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
2784 * VT-x, the flag's condition to be cleared is met and thus the cleared state is correct.
2785 */
2786 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2787 }
2788 else if (pMixedCtx->eflags.Bits.u1IF)
2789 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
2790 else
2791 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
2792 }
2793 return uIntrState;
2794}
2795
2796
2797/**
2798 * Loads the guest's interruptibility-state into the guest-state area in the
2799 * VMCS.
2800 *
2801 * @returns VBox status code.
2802 * @param pVCpu Pointer to the VMCPU.
2803 * @param uIntrState The interruptibility-state to set.
2804 */
2805static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
2806{
2807 AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */
2808 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
2809 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
2810 AssertRCReturn(rc, rc);
2811 return rc;
2812}
2813
2814
2815/**
2816 * Loads the guest's RIP into the guest-state area in the VMCS.
2817 *
2818 * @returns VBox status code.
2819 * @param pVCpu Pointer to the VMCPU.
2820 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2821 * out-of-sync. Make sure to update the required fields
2822 * before using them.
2823 *
2824 * @remarks No-long-jump zone!!!
2825 */
2826static int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2827{
2828 int rc = VINF_SUCCESS;
2829 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RIP)
2830 {
2831 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
2832 AssertRCReturn(rc, rc);
2833 Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip));
2834 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP;
2835 }
2836 return rc;
2837}
2838
2839
2840/**
2841 * Loads the guest's RSP into the guest-state area in the VMCS.
2842 *
2843 * @returns VBox status code.
2844 * @param pVCpu Pointer to the VMCPU.
2845 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2846 * out-of-sync. Make sure to update the required fields
2847 * before using them.
2848 *
2849 * @remarks No-long-jump zone!!!
2850 */
2851static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2852{
2853 int rc = VINF_SUCCESS;
2854 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RSP)
2855 {
2856 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
2857 AssertRCReturn(rc, rc);
2858 Log4(("Load: VMX_VMCS_GUEST_RSP=%#RX64\n", pMixedCtx->rsp));
2859 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP;
2860 }
2861 return rc;
2862}
2863
2864
2865/**
2866 * Loads the guest's RFLAGS into the guest-state area in the VMCS.
2867 *
2868 * @returns VBox status code.
2869 * @param pVCpu Pointer to the VMCPU.
2870 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2871 * out-of-sync. Make sure to update the required fields
2872 * before using them.
2873 *
2874 * @remarks No-long-jump zone!!!
2875 */
2876static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2877{
2878 int rc = VINF_SUCCESS;
2879 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)
2880 {
2881 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
2882 Let us assert it as such and use 32-bit VMWRITE. */
2883 Assert(!(pMixedCtx->rflags.u64 >> 32));
2884 X86EFLAGS uEFlags = pMixedCtx->eflags;
2885 uEFlags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */
2886 uEFlags.u32 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */
2887
2888 /*
2889 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM exit.
2890 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
2891 */
2892 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2893 {
2894 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2895 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2896 pVCpu->hm.s.vmx.RealMode.eflags.u32 = uEFlags.u32; /* Save the original eflags of the real-mode guest. */
2897 uEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
2898 uEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
2899 }
2900
2901 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, uEFlags.u32);
2902 AssertRCReturn(rc, rc);
2903
2904 Log4(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", uEFlags.u32));
2905 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;
2906 }
2907 return rc;
2908}
2909
2910
2911/**
2912 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
2913 *
2914 * @returns VBox status code.
2915 * @param pVCpu Pointer to the VMCPU.
2916 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2917 * out-of-sync. Make sure to update the required fields
2918 * before using them.
2919 *
2920 * @remarks No-long-jump zone!!!
2921 */
2922DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2923{
2924 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
2925 AssertRCReturn(rc, rc);
2926 rc = hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
2927 AssertRCReturn(rc, rc);
2928 rc = hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
2929 AssertRCReturn(rc, rc);
2930 return rc;
2931}
2932
2933
2934/**
2935 * Loads the guest control registers (CR0, CR3, CR4) into the guest-state area
2936 * in the VMCS.
2937 *
2938 * @returns VBox status code.
2939 * @param pVM Pointer to the VM.
2940 * @param pVCpu Pointer to the VMCPU.
2941 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2942 * out-of-sync. Make sure to update the required fields
2943 * before using them.
2944 *
2945 * @remarks No-long-jump zone!!!
2946 */
2947static int hmR0VmxLoadGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
2948{
2949 int rc = VINF_SUCCESS;
2950 PVM pVM = pVCpu->CTX_SUFF(pVM);
2951
2952 /*
2953 * Guest CR0.
2954 * Guest FPU.
2955 */
2956 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
2957 {
2958 Assert(!(pCtx->cr0 >> 32));
2959 uint32_t u32GuestCR0 = pCtx->cr0;
2960
2961 /* The guest's view (read access) of its CR0 is unblemished. */
2962 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0);
2963 AssertRCReturn(rc, rc);
2964 Log4(("Load: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", u32GuestCR0));
2965
2966 /* Setup VT-x's view of the guest CR0. */
2967 /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
2968 if (pVM->hm.s.fNestedPaging)
2969 {
2970 if (CPUMIsGuestPagingEnabledEx(pCtx))
2971 {
2972 /* The guest has paging enabled, let it access CR3 without causing a VM exit if supported. */
2973 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
2974 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
2975 }
2976 else
2977 {
2978 /* The guest doesn't have paging enabled, make CR3 access to cause VM exits to update our shadow. */
2979 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
2980 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
2981 }
2982
2983 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2984 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2985 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
2986
2987 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
2988 AssertRCReturn(rc, rc);
2989 }
2990 else
2991 u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2992
2993 /*
2994 * Guest FPU bits.
2995 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
2996 * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2997 */
2998 u32GuestCR0 |= X86_CR0_NE;
2999 bool fInterceptNM = false;
3000 if (CPUMIsGuestFPUStateActive(pVCpu))
3001 {
3002 fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */
3003 /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.
3004 We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */
3005 }
3006 else
3007 {
3008 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
3009 u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
3010 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
3011 }
3012
3013 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
3014 bool fInterceptMF = false;
3015 if (!(pCtx->cr0 & X86_CR0_NE))
3016 fInterceptMF = true;
3017
3018 /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
3019 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3020 {
3021 Assert(PDMVmmDevHeapIsEnabled(pVM));
3022 Assert(pVM->hm.s.vmx.pRealModeTSS);
3023 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
3024 fInterceptNM = true;
3025 fInterceptMF = true;
3026 }
3027 else
3028 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
3029
3030 if (fInterceptNM)
3031 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);
3032 else
3033 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);
3034
3035 if (fInterceptMF)
3036 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
3037 else
3038 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);
3039
3040 /* Additional intercepts for debugging, define these yourself explicitly. */
3041#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3042 pVCpu->hm.s.vmx.u32XcptBitmap |= 0
3043 | RT_BIT(X86_XCPT_BP)
3044 | RT_BIT(X86_XCPT_DB)
3045 | RT_BIT(X86_XCPT_DE)
3046 | RT_BIT(X86_XCPT_NM)
3047 | RT_BIT(X86_XCPT_UD)
3048 | RT_BIT(X86_XCPT_NP)
3049 | RT_BIT(X86_XCPT_SS)
3050 | RT_BIT(X86_XCPT_GP)
3051 | RT_BIT(X86_XCPT_PF)
3052 | RT_BIT(X86_XCPT_MF)
3053 ;
3054#elif defined(HMVMX_ALWAYS_TRAP_PF)
3055 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
3056#endif
3057
3058 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
3059
3060 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
3061 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 & pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
3062 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 | pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
3063 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
3064 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
3065 else
3066 Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
3067
3068 u32GuestCR0 |= uSetCR0;
3069 u32GuestCR0 &= uZapCR0;
3070 u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
3071
3072 /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */
3073 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
3074 AssertRCReturn(rc, rc);
3075 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3076 AssertRCReturn(rc, rc);
3077 Log4(("Load: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", u32GuestCR0, uSetCR0, uZapCR0));
3078
3079 /*
3080 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
3081 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
3082 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
3083 */
3084 uint32_t u32CR0Mask = 0;
3085 u32CR0Mask = X86_CR0_PE
3086 | X86_CR0_NE
3087 | X86_CR0_WP
3088 | X86_CR0_PG
3089 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
3090 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
3091 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
3092 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3093 u32CR0Mask &= ~X86_CR0_PE;
3094 if (pVM->hm.s.fNestedPaging)
3095 u32CR0Mask &= ~X86_CR0_WP;
3096
3097 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
3098 if (fInterceptNM)
3099 u32CR0Mask |= (X86_CR0_TS | X86_CR0_MP);
3100 else
3101 u32CR0Mask &= ~(X86_CR0_TS | X86_CR0_MP);
3102
3103 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
3104 pVCpu->hm.s.vmx.u32CR0Mask = u32CR0Mask;
3105 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask);
3106 AssertRCReturn(rc, rc);
3107
3108 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
3109 }
3110
3111 /*
3112 * Guest CR2.
3113 * It's always loaded in the assembler code. Nothing to do here.
3114 */
3115
3116 /*
3117 * Guest CR3.
3118 */
3119 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
3120 {
3121 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
3122 if (pVM->hm.s.fNestedPaging)
3123 {
3124 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
3125
3126 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
3127 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
3128 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
3129 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
3130
3131 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
3132 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
3133 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
3134
3135 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
3136 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
3137 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x3f) == 0, /* Bits 6:11 MBZ. */
3138 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3139
3140 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
3141 AssertRCReturn(rc, rc);
3142 Log4(("Load: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3143
3144 if ( pVM->hm.s.vmx.fUnrestrictedGuest
3145 || CPUMIsGuestPagingEnabledEx(pCtx))
3146 {
3147 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
3148 if (CPUMIsGuestInPAEModeEx(pCtx))
3149 {
3150 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); AssertRCReturn(rc, rc);
3151 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
3152 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
3153 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
3154 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
3155 }
3156
3157 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
3158 have Unrestricted Execution to handle the guest when it's not using paging. */
3159 GCPhysGuestCR3 = pCtx->cr3;
3160 }
3161 else
3162 {
3163 /*
3164 * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
3165 * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
3166 * EPT takes care of translating it to host-physical addresses.
3167 */
3168 RTGCPHYS GCPhys;
3169 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
3170 Assert(PDMVmmDevHeapIsEnabled(pVM));
3171
3172 /* We obtain it here every time as the guest could have relocated this PCI region. */
3173 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
3174 AssertRCReturn(rc, rc);
3175
3176 GCPhysGuestCR3 = GCPhys;
3177 }
3178
3179 Log4(("Load: VMX_VMCS_GUEST_CR3=%#RGv (GstN)\n", GCPhysGuestCR3));
3180 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
3181 }
3182 else
3183 {
3184 /* Non-nested paging case, just use the hypervisor's CR3. */
3185 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
3186
3187 Log4(("Load: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", HCPhysGuestCR3));
3188 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
3189 }
3190 AssertRCReturn(rc, rc);
3191
3192 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR3;
3193 }
3194
3195 /*
3196 * Guest CR4.
3197 */
3198 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
3199 {
3200 Assert(!(pCtx->cr4 >> 32));
3201 uint32_t u32GuestCR4 = pCtx->cr4;
3202
3203 /* The guest's view of its CR4 is unblemished. */
3204 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4);
3205 AssertRCReturn(rc, rc);
3206 Log4(("Load: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", u32GuestCR4));
3207
3208 /* Setup VT-x's view of the guest CR4. */
3209 /*
3210 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
3211 * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
3212 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
3213 */
3214 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3215 {
3216 Assert(pVM->hm.s.vmx.pRealModeTSS);
3217 Assert(PDMVmmDevHeapIsEnabled(pVM));
3218 u32GuestCR4 &= ~X86_CR4_VME;
3219 }
3220
3221 if (pVM->hm.s.fNestedPaging)
3222 {
3223 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
3224 && !pVM->hm.s.vmx.fUnrestrictedGuest)
3225 {
3226 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
3227 u32GuestCR4 |= X86_CR4_PSE;
3228 /* Our identity mapping is a 32 bits page directory. */
3229 u32GuestCR4 &= ~X86_CR4_PAE;
3230 }
3231 /* else use guest CR4.*/
3232 }
3233 else
3234 {
3235 /*
3236 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
3237 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
3238 */
3239 switch (pVCpu->hm.s.enmShadowMode)
3240 {
3241 case PGMMODE_REAL: /* Real-mode. */
3242 case PGMMODE_PROTECTED: /* Protected mode without paging. */
3243 case PGMMODE_32_BIT: /* 32-bit paging. */
3244 {
3245 u32GuestCR4 &= ~X86_CR4_PAE;
3246 break;
3247 }
3248
3249 case PGMMODE_PAE: /* PAE paging. */
3250 case PGMMODE_PAE_NX: /* PAE paging with NX. */
3251 {
3252 u32GuestCR4 |= X86_CR4_PAE;
3253 break;
3254 }
3255
3256 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
3257 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
3258#ifdef VBOX_ENABLE_64_BITS_GUESTS
3259 break;
3260#endif
3261 default:
3262 AssertFailed();
3263 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
3264 }
3265 }
3266
3267 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
3268 uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 & pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
3269 uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 | pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
3270 u32GuestCR4 |= uSetCR4;
3271 u32GuestCR4 &= uZapCR4;
3272
3273 /* Write VT-x's view of the guest CR4 into the VMCS. */
3274 Log4(("Load: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", u32GuestCR4, uSetCR4, uZapCR4));
3275 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4);
3276 AssertRCReturn(rc, rc);
3277
3278 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM exit. */
3279 uint32_t u32CR4Mask = 0;
3280 u32CR4Mask = X86_CR4_VME
3281 | X86_CR4_PAE
3282 | X86_CR4_PGE
3283 | X86_CR4_PSE
3284 | X86_CR4_VMXE;
3285 pVCpu->hm.s.vmx.u32CR4Mask = u32CR4Mask;
3286 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask);
3287 AssertRCReturn(rc, rc);
3288
3289 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;
3290 }
3291 return rc;
3292}
3293
3294
3295/**
3296 * Loads the guest debug registers into the guest-state area in the VMCS.
3297 * This also sets up whether #DB and MOV DRx accesses cause VM exits.
3298 *
3299 * @returns VBox status code.
3300 * @param pVCpu Pointer to the VMCPU.
3301 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3302 * out-of-sync. Make sure to update the required fields
3303 * before using them.
3304 *
3305 * @remarks No-long-jump zone!!!
3306 */
3307static int hmR0VmxLoadGuestDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3308{
3309 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
3310 return VINF_SUCCESS;
3311
3312#ifdef VBOX_STRICT
3313 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
3314 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
3315 {
3316 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
3317 Assert((pMixedCtx->dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0); /* Bits 63:32, 15, 14, 12, 11 are reserved. */
3318 Assert((pMixedCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); /* Bit 10 is reserved (RA1). */
3319 }
3320#endif
3321
3322 int rc;
3323 PVM pVM = pVCpu->CTX_SUFF(pVM);
3324 bool fInterceptDB = false;
3325 bool fInterceptMovDRx = false;
3326 if (pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu))
3327 {
3328 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
3329 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
3330 {
3331 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
3332 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3333 AssertRCReturn(rc, rc);
3334 Assert(fInterceptDB == false);
3335 }
3336 else
3337 {
3338 fInterceptDB = true;
3339 pMixedCtx->eflags.u32 |= X86_EFL_TF;
3340 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RFLAGS;
3341 }
3342 }
3343
3344 if (fInterceptDB || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
3345 {
3346 /*
3347 * Use the combined guest and host DRx values found in the hypervisor
3348 * register set because the debugger has breakpoints active or someone
3349 * is single stepping on the host side without a monitor trap flag.
3350 *
3351 * Note! DBGF expects a clean DR6 state before executing guest code.
3352 */
3353 if (!CPUMIsHyperDebugStateActive(pVCpu))
3354 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
3355 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
3356 Assert(CPUMIsHyperDebugStateActive(pVCpu));
3357
3358 /* Update DR7. (The other DRx values are handled by CPUM one way or the other.) */
3359 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)CPUMGetHyperDR7(pVCpu));
3360 AssertRCReturn(rc, rc);
3361
3362 fInterceptDB = true;
3363 fInterceptMovDRx = true;
3364 }
3365 else
3366 {
3367 /*
3368 * If the guest has enabled debug registers, we need to load them prior to
3369 * executing guest code so they'll trigger at the right time.
3370 */
3371 if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
3372 {
3373 if (!CPUMIsGuestDebugStateActive(pVCpu))
3374 {
3375 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
3376 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
3377 }
3378 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
3379 Assert(CPUMIsGuestDebugStateActive(pVCpu));
3380 }
3381 /*
3382 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
3383 * must intercept #DB in order to maintain a correct DR6 guest value.
3384 */
3385 else if (!CPUMIsGuestDebugStateActive(pVCpu))
3386 {
3387 fInterceptMovDRx = true;
3388 fInterceptDB = true;
3389 }
3390
3391 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
3392 AssertRCReturn(rc, rc);
3393 }
3394
3395 /*
3396 * Update the exception bitmap regarding intercepting #DB generated by the guest.
3397 */
3398 if (fInterceptDB)
3399 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
3400 else if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3401 {
3402#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3403 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
3404#endif
3405 }
3406 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3407 AssertRCReturn(rc, rc);
3408
3409 /*
3410 * Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions.
3411 */
3412 if (fInterceptMovDRx)
3413 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
3414 else
3415 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
3416 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3417 AssertRCReturn(rc, rc);
3418
3419 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
3420 return VINF_SUCCESS;
3421}
3422
3423
3424#ifdef VBOX_STRICT
3425/**
3426 * Strict function to validate segment registers.
3427 *
3428 * @remarks Requires CR0.
3429 */
3430static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3431{
3432 /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3433 /* NOTE: The reason we check for attribute value 0 and not just the unusable bit here is because hmR0VmxWriteSegmentReg()
3434 * only updates the VMCS' copy of the value with the unusable bit and doesn't change the guest-context value. */
3435 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
3436 && ( !CPUMIsGuestInRealModeEx(pCtx)
3437 && !CPUMIsGuestInV86ModeEx(pCtx)))
3438 {
3439 /* Protected mode checks */
3440 /* CS */
3441 Assert(pCtx->cs.Attr.n.u1Present);
3442 Assert(!(pCtx->cs.Attr.u & 0xf00));
3443 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
3444 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
3445 || !(pCtx->cs.Attr.n.u1Granularity));
3446 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
3447 || (pCtx->cs.Attr.n.u1Granularity));
3448 /* CS cannot be loaded with NULL in protected mode. */
3449 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS?!? */
3450 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
3451 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
3452 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
3453 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
3454 else
3455 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
3456 /* SS */
3457 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
3458 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
3459 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0));
3460 if ( !(pCtx->cr0 & X86_CR0_PE)
3461 || pCtx->cs.Attr.n.u4Type == 3)
3462 {
3463 Assert(!pCtx->ss.Attr.n.u2Dpl);
3464 }
3465 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
3466 {
3467 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
3468 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
3469 Assert(pCtx->ss.Attr.n.u1Present);
3470 Assert(!(pCtx->ss.Attr.u & 0xf00));
3471 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
3472 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
3473 || !(pCtx->ss.Attr.n.u1Granularity));
3474 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
3475 || (pCtx->ss.Attr.n.u1Granularity));
3476 }
3477 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
3478 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
3479 {
3480 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3481 Assert(pCtx->ds.Attr.n.u1Present);
3482 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
3483 Assert(!(pCtx->ds.Attr.u & 0xf00));
3484 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
3485 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
3486 || !(pCtx->ds.Attr.n.u1Granularity));
3487 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
3488 || (pCtx->ds.Attr.n.u1Granularity));
3489 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3490 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
3491 }
3492 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
3493 {
3494 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3495 Assert(pCtx->es.Attr.n.u1Present);
3496 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
3497 Assert(!(pCtx->es.Attr.u & 0xf00));
3498 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
3499 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
3500 || !(pCtx->es.Attr.n.u1Granularity));
3501 Assert( !(pCtx->es.u32Limit & 0xfff00000)
3502 || (pCtx->es.Attr.n.u1Granularity));
3503 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3504 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
3505 }
3506 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
3507 {
3508 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3509 Assert(pCtx->fs.Attr.n.u1Present);
3510 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
3511 Assert(!(pCtx->fs.Attr.u & 0xf00));
3512 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
3513 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
3514 || !(pCtx->fs.Attr.n.u1Granularity));
3515 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
3516 || (pCtx->fs.Attr.n.u1Granularity));
3517 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3518 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
3519 }
3520 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
3521 {
3522 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3523 Assert(pCtx->gs.Attr.n.u1Present);
3524 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
3525 Assert(!(pCtx->gs.Attr.u & 0xf00));
3526 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
3527 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
3528 || !(pCtx->gs.Attr.n.u1Granularity));
3529 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
3530 || (pCtx->gs.Attr.n.u1Granularity));
3531 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3532 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
3533 }
3534 /* 64-bit capable CPUs. */
3535# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3536 Assert(!(pCtx->cs.u64Base >> 32));
3537 Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
3538 Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
3539 Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
3540# endif
3541 }
3542 else if ( CPUMIsGuestInV86ModeEx(pCtx)
3543 || ( CPUMIsGuestInRealModeEx(pCtx)
3544 && !pVM->hm.s.vmx.fUnrestrictedGuest))
3545 {
3546 /* Real and v86 mode checks. */
3547 /* hmR0VmxWriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
3548 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
3549 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3550 {
3551 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
3552 }
3553 else
3554 {
3555 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
3556 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
3557 }
3558
3559 /* CS */
3560 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
3561 Assert(pCtx->cs.u32Limit == 0xffff);
3562 Assert(u32CSAttr == 0xf3);
3563 /* SS */
3564 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
3565 Assert(pCtx->ss.u32Limit == 0xffff);
3566 Assert(u32SSAttr == 0xf3);
3567 /* DS */
3568 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
3569 Assert(pCtx->ds.u32Limit == 0xffff);
3570 Assert(u32DSAttr == 0xf3);
3571 /* ES */
3572 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
3573 Assert(pCtx->es.u32Limit == 0xffff);
3574 Assert(u32ESAttr == 0xf3);
3575 /* FS */
3576 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
3577 Assert(pCtx->fs.u32Limit == 0xffff);
3578 Assert(u32FSAttr == 0xf3);
3579 /* GS */
3580 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
3581 Assert(pCtx->gs.u32Limit == 0xffff);
3582 Assert(u32GSAttr == 0xf3);
3583 /* 64-bit capable CPUs. */
3584# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3585 Assert(!(pCtx->cs.u64Base >> 32));
3586 Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
3587 Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
3588 Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
3589# endif
3590 }
3591}
3592#endif /* VBOX_STRICT */
3593
3594
3595/**
3596 * Writes a guest segment register into the guest-state area in the VMCS.
3597 *
3598 * @returns VBox status code.
3599 * @param pVCpu Pointer to the VMCPU.
3600 * @param idxSel Index of the selector in the VMCS.
3601 * @param idxLimit Index of the segment limit in the VMCS.
3602 * @param idxBase Index of the segment base in the VMCS.
3603 * @param idxAccess Index of the access rights of the segment in the VMCS.
3604 * @param pSelReg Pointer to the segment selector.
3605 * @param pCtx Pointer to the guest-CPU context.
3606 *
3607 * @remarks No-long-jump zone!!!
3608 */
3609static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
3610 uint32_t idxAccess, PCPUMSELREG pSelReg, PCPUMCTX pCtx)
3611{
3612 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
3613 AssertRCReturn(rc, rc);
3614 rc = VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
3615 AssertRCReturn(rc, rc);
3616 rc = VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
3617 AssertRCReturn(rc, rc);
3618
3619 uint32_t u32Access = pSelReg->Attr.u;
3620 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3621 {
3622 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
3623 u32Access = 0xf3;
3624 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3625 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3626 }
3627 else
3628 {
3629 /*
3630 * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
3631 * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
3632 * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
3633 * loaded in protected-mode have their attribute as 0.
3634 */
3635 if (!u32Access)
3636 u32Access = X86DESCATTR_UNUSABLE;
3637 }
3638
3639 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
3640 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
3641 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
3642
3643 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
3644 AssertRCReturn(rc, rc);
3645 return rc;
3646}
3647
3648
3649/**
3650 * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
3651 * into the guest-state area in the VMCS.
3652 *
3653 * @returns VBox status code.
3654 * @param pVM Pointer to the VM.
3655 * @param pVCPU Pointer to the VMCPU.
3656 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3657 * out-of-sync. Make sure to update the required fields
3658 * before using them.
3659 *
3660 * @remarks Requires CR0 (strict builds validation).
3661 * @remarks No-long-jump zone!!!
3662 */
3663static int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3664{
3665 int rc = VERR_INTERNAL_ERROR_5;
3666 PVM pVM = pVCpu->CTX_SUFF(pVM);
3667
3668 /*
3669 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
3670 */
3671 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
3672 {
3673 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
3674 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3675 {
3676 pVCpu->hm.s.vmx.RealMode.uAttrCS.u = pMixedCtx->cs.Attr.u;
3677 pVCpu->hm.s.vmx.RealMode.uAttrSS.u = pMixedCtx->ss.Attr.u;
3678 pVCpu->hm.s.vmx.RealMode.uAttrDS.u = pMixedCtx->ds.Attr.u;
3679 pVCpu->hm.s.vmx.RealMode.uAttrES.u = pMixedCtx->es.Attr.u;
3680 pVCpu->hm.s.vmx.RealMode.uAttrFS.u = pMixedCtx->fs.Attr.u;
3681 pVCpu->hm.s.vmx.RealMode.uAttrGS.u = pMixedCtx->gs.Attr.u;
3682 }
3683
3684#ifdef VBOX_WITH_REM
3685 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
3686 {
3687 Assert(pVM->hm.s.vmx.pRealModeTSS);
3688 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
3689 if ( pVCpu->hm.s.vmx.fWasInRealMode
3690 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
3691 {
3692 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
3693 in real-mode (e.g. OpenBSD 4.0) */
3694 REMFlushTBs(pVM);
3695 Log4(("Load: Switch to protected mode detected!\n"));
3696 pVCpu->hm.s.vmx.fWasInRealMode = false;
3697 }
3698 }
3699#endif
3700 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
3701 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs, pMixedCtx);
3702 AssertRCReturn(rc, rc);
3703 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
3704 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss, pMixedCtx);
3705 AssertRCReturn(rc, rc);
3706 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
3707 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds, pMixedCtx);
3708 AssertRCReturn(rc, rc);
3709 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
3710 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es, pMixedCtx);
3711 AssertRCReturn(rc, rc);
3712 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
3713 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs, pMixedCtx);
3714 AssertRCReturn(rc, rc);
3715 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
3716 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs, pMixedCtx);
3717 AssertRCReturn(rc, rc);
3718
3719 Log4(("Load: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base,
3720 pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
3721#ifdef VBOX_STRICT
3722 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
3723#endif
3724 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
3725 }
3726
3727 /*
3728 * Guest TR.
3729 */
3730 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
3731 {
3732 /*
3733 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
3734 * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
3735 * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
3736 */
3737 uint16_t u16Sel = 0;
3738 uint32_t u32Limit = 0;
3739 uint64_t u64Base = 0;
3740 uint32_t u32AccessRights = 0;
3741
3742 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3743 {
3744 u16Sel = pMixedCtx->tr.Sel;
3745 u32Limit = pMixedCtx->tr.u32Limit;
3746 u64Base = pMixedCtx->tr.u64Base;
3747 u32AccessRights = pMixedCtx->tr.Attr.u;
3748 }
3749 else
3750 {
3751 Assert(pVM->hm.s.vmx.pRealModeTSS);
3752 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
3753
3754 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
3755 RTGCPHYS GCPhys;
3756 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
3757 AssertRCReturn(rc, rc);
3758
3759 X86DESCATTR DescAttr;
3760 DescAttr.u = 0;
3761 DescAttr.n.u1Present = 1;
3762 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
3763
3764 u16Sel = 0;
3765 u32Limit = HM_VTX_TSS_SIZE;
3766 u64Base = GCPhys; /* in real-mode phys = virt. */
3767 u32AccessRights = DescAttr.u;
3768 }
3769
3770 /* Validate. */
3771 Assert(!(u16Sel & RT_BIT(2)));
3772 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
3773 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
3774 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
3775 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
3776 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
3777 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
3778 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
3779 Assert( (u32Limit & 0xfff) == 0xfff
3780 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
3781 Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000)
3782 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
3783
3784 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel); AssertRCReturn(rc, rc);
3785 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRCReturn(rc, rc);
3786 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRCReturn(rc, rc);
3787 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRCReturn(rc, rc);
3788
3789 Log4(("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base));
3790 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR;
3791 }
3792
3793 /*
3794 * Guest GDTR.
3795 */
3796 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
3797 {
3798 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); AssertRCReturn(rc, rc);
3799 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); AssertRCReturn(rc, rc);
3800
3801 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
3802 Log4(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt));
3803 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;
3804 }
3805
3806 /*
3807 * Guest LDTR.
3808 */
3809 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
3810 {
3811 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
3812 uint32_t u32Access = 0;
3813 if (!pMixedCtx->ldtr.Attr.u)
3814 u32Access = X86DESCATTR_UNUSABLE;
3815 else
3816 u32Access = pMixedCtx->ldtr.Attr.u;
3817
3818 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel); AssertRCReturn(rc, rc);
3819 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit); AssertRCReturn(rc, rc);
3820 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base); AssertRCReturn(rc, rc);
3821 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRCReturn(rc, rc);
3822
3823 /* Validate. */
3824 if (!(u32Access & X86DESCATTR_UNUSABLE))
3825 {
3826 Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
3827 Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
3828 Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
3829 Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
3830 Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
3831 Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
3832 Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
3833 || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
3834 Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
3835 || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
3836 }
3837
3838 Log4(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base));
3839 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;
3840 }
3841
3842 /*
3843 * Guest IDTR.
3844 */
3845 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
3846 {
3847 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); AssertRCReturn(rc, rc);
3848 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); AssertRCReturn(rc, rc);
3849
3850 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
3851 Log4(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt));
3852 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
3853 }
3854
3855 return VINF_SUCCESS;
3856}
3857
3858
3859/**
3860 * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
3861 * areas. These MSRs will automatically be loaded to the host CPU on every
3862 * successful VM entry and stored from the host CPU on every successful VM exit.
3863 * Also loads the sysenter MSRs into the guest-state area in the VMCS.
3864 *
3865 * @returns VBox status code.
3866 * @param pVCpu Pointer to the VMCPU.
3867 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3868 * out-of-sync. Make sure to update the required fields
3869 * before using them.
3870 *
3871 * @remarks No-long-jump zone!!!
3872 */
3873static int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3874{
3875 AssertPtr(pVCpu);
3876 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
3877
3878 /*
3879 * MSRs covered by Auto-load/store: EFER, LSTAR, STAR, SF_MASK, TSC_AUX (RDTSCP).
3880 */
3881 int rc = VINF_SUCCESS;
3882 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
3883 {
3884#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
3885 PVM pVM = pVCpu->CTX_SUFF(pVM);
3886 PVMXMSR pGuestMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
3887 uint32_t cGuestMsrs = 0;
3888
3889 /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */
3890 /** @todo r=ramshankar: Optimize this further to do lazy restoration and only
3891 * when the guest really is in 64-bit mode. */
3892 bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
3893 if (fSupportsLongMode)
3894 {
3895 pGuestMsr->u32IndexMSR = MSR_K8_LSTAR;
3896 pGuestMsr->u32Reserved = 0;
3897 pGuestMsr->u64Value = pMixedCtx->msrLSTAR; /* 64 bits mode syscall rip */
3898 pGuestMsr++; cGuestMsrs++;
3899 pGuestMsr->u32IndexMSR = MSR_K6_STAR;
3900 pGuestMsr->u32Reserved = 0;
3901 pGuestMsr->u64Value = pMixedCtx->msrSTAR; /* legacy syscall eip, cs & ss */
3902 pGuestMsr++; cGuestMsrs++;
3903 pGuestMsr->u32IndexMSR = MSR_K8_SF_MASK;
3904 pGuestMsr->u32Reserved = 0;
3905 pGuestMsr->u64Value = pMixedCtx->msrSFMASK; /* syscall flag mask */
3906 pGuestMsr++; cGuestMsrs++;
3907 pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
3908 pGuestMsr->u32Reserved = 0;
3909 pGuestMsr->u64Value = pMixedCtx->msrKERNELGSBASE; /* swapgs exchange value */
3910 pGuestMsr++; cGuestMsrs++;
3911 }
3912
3913 /*
3914 * RDTSCP requires the TSC_AUX MSR. Host and guest share the physical MSR. So we have to
3915 * load the guest's copy if the guest can execute RDTSCP without causing VM-exits.
3916 */
3917 if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP)
3918 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP))
3919 {
3920 pGuestMsr->u32IndexMSR = MSR_K8_TSC_AUX;
3921 pGuestMsr->u32Reserved = 0;
3922 rc = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pGuestMsr->u64Value);
3923 AssertRCReturn(rc, rc);
3924 pGuestMsr++; cGuestMsrs++;
3925 }
3926
3927 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
3928 if (cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc))
3929 {
3930 LogRel(("CPU autoload/store MSR count in VMCS exceeded cGuestMsrs=%u.\n", cGuestMsrs));
3931 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3932 }
3933
3934 /* Update the VCPU's copy of the guest MSR count. */
3935 pVCpu->hm.s.vmx.cGuestMsrs = cGuestMsrs;
3936 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cGuestMsrs); AssertRCReturn(rc, rc);
3937 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs); AssertRCReturn(rc, rc);
3938#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
3939
3940 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_AUTO_MSRS;
3941 }
3942
3943 /*
3944 * Guest Sysenter MSRs.
3945 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
3946 * VM-exits on WRMSRs for these MSRs.
3947 */
3948 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
3949 {
3950 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc);
3951 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_CS_MSR;
3952 }
3953 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
3954 {
3955 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc);
3956 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR;
3957 }
3958 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
3959 {
3960 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc);
3961 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR;
3962 }
3963
3964 return rc;
3965}
3966
3967
3968/**
3969 * Loads the guest activity state into the guest-state area in the VMCS.
3970 *
3971 * @returns VBox status code.
3972 * @param pVCpu Pointer to the VMCPU.
3973 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3974 * out-of-sync. Make sure to update the required fields
3975 * before using them.
3976 *
3977 * @remarks No-long-jump zone!!!
3978 */
3979static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx)
3980{
3981 /** @todo See if we can make use of other states, e.g.
3982 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */
3983 int rc = VINF_SUCCESS;
3984 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_ACTIVITY_STATE)
3985 {
3986 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
3987 AssertRCReturn(rc, rc);
3988 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_ACTIVITY_STATE;
3989 }
3990 return rc;
3991}
3992
3993
3994/**
3995 * Sets up the appropriate function to run guest code.
3996 *
3997 * @returns VBox status code.
3998 * @param pVCpu Pointer to the VMCPU.
3999 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4000 * out-of-sync. Make sure to update the required fields
4001 * before using them.
4002 *
4003 * @remarks No-long-jump zone!!!
4004 */
4005static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4006{
4007 if (CPUMIsGuestInLongModeEx(pMixedCtx))
4008 {
4009#ifndef VBOX_ENABLE_64_BITS_GUESTS
4010 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4011#endif
4012 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
4013#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4014 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
4015 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
4016#else
4017 /* 64-bit host or hybrid host. */
4018 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
4019#endif
4020 }
4021 else
4022 {
4023 /* Guest is not in long mode, use the 32-bit handler. */
4024 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4025 }
4026 Assert(pVCpu->hm.s.vmx.pfnStartVM);
4027 return VINF_SUCCESS;
4028}
4029
4030
4031/**
4032 * Wrapper for running the guest code in VT-x.
4033 *
4034 * @returns VBox strict status code.
4035 * @param pVM Pointer to the VM.
4036 * @param pVCpu Pointer to the VMCPU.
4037 * @param pCtx Pointer to the guest-CPU context.
4038 *
4039 * @remarks No-long-jump zone!!!
4040 */
4041DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4042{
4043 /*
4044 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
4045 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
4046 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
4047 */
4048#ifdef VBOX_WITH_KERNEL_USING_XMM
4049 return HMR0VMXStartVMWrapXMM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
4050#else
4051 return pVCpu->hm.s.vmx.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
4052#endif
4053}
4054
4055
4056/**
4057 * Reports world-switch error and dumps some useful debug info.
4058 *
4059 * @param pVM Pointer to the VM.
4060 * @param pVCpu Pointer to the VMCPU.
4061 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
4062 * @param pCtx Pointer to the guest-CPU context.
4063 * @param pVmxTransient Pointer to the VMX transient structure (only
4064 * exitReason updated).
4065 */
4066static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
4067{
4068 Assert(pVM);
4069 Assert(pVCpu);
4070 Assert(pCtx);
4071 Assert(pVmxTransient);
4072 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4073
4074 Log4(("VM-entry failure: %Rrc\n", rcVMRun));
4075 switch (rcVMRun)
4076 {
4077 case VERR_VMX_INVALID_VMXON_PTR:
4078 AssertFailed();
4079 break;
4080 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
4081 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
4082 {
4083 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
4084 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
4085 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
4086 AssertRC(rc);
4087
4088#ifdef VBOX_STRICT
4089 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
4090 pVmxTransient->uExitReason));
4091 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification));
4092 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
4093 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
4094 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));
4095 else
4096 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
4097
4098 /* VMX control bits. */
4099 uint32_t u32Val;
4100 uint64_t u64Val;
4101 HMVMXHCUINTREG uHCReg;
4102 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc);
4103 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));
4104 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc);
4105 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));
4106 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc);
4107 Log4(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));
4108 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc);
4109 Log4(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));
4110 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc);
4111 Log4(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));
4112 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
4113 Log4(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
4114 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
4115 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
4116 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
4117 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
4118 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
4119 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
4120 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
4121 Log4(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
4122 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
4123 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
4124 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4125 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
4126 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4127 Log4(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
4128 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
4129 Log4(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
4130 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
4131 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
4132 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
4133 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
4134 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
4135 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
4136 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
4137 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4138 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
4139 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
4140 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
4141 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4142 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
4143 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
4144
4145 /* Guest bits. */
4146 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc);
4147 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pCtx->rip, u64Val));
4148 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc);
4149 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pCtx->rsp, u64Val));
4150 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
4151 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
4152 rc = VMXReadVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, &u32Val); AssertRC(rc);
4153 Log4(("VMX_VMCS16_GUEST_FIELD_VPID %u\n", u32Val));
4154
4155 /* Host bits. */
4156 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
4157 Log4(("Host CR0 %#RHr\n", uHCReg));
4158 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
4159 Log4(("Host CR3 %#RHr\n", uHCReg));
4160 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
4161 Log4(("Host CR4 %#RHr\n", uHCReg));
4162
4163 RTGDTR HostGdtr;
4164 PCX86DESCHC pDesc;
4165 ASMGetGDTR(&HostGdtr);
4166 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_CS, &u32Val); AssertRC(rc);
4167 Log4(("Host CS %#08x\n", u32Val));
4168 if (u32Val < HostGdtr.cbGdt)
4169 {
4170 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4171 HMR0DumpDescriptor(pDesc, u32Val, "CS: ");
4172 }
4173
4174 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_DS, &u32Val); AssertRC(rc);
4175 Log4(("Host DS %#08x\n", u32Val));
4176 if (u32Val < HostGdtr.cbGdt)
4177 {
4178 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4179 HMR0DumpDescriptor(pDesc, u32Val, "DS: ");
4180 }
4181
4182 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_ES, &u32Val); AssertRC(rc);
4183 Log4(("Host ES %#08x\n", u32Val));
4184 if (u32Val < HostGdtr.cbGdt)
4185 {
4186 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4187 HMR0DumpDescriptor(pDesc, u32Val, "ES: ");
4188 }
4189
4190 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_FS, &u32Val); AssertRC(rc);
4191 Log4(("Host FS %#08x\n", u32Val));
4192 if (u32Val < HostGdtr.cbGdt)
4193 {
4194 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4195 HMR0DumpDescriptor(pDesc, u32Val, "FS: ");
4196 }
4197
4198 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_GS, &u32Val); AssertRC(rc);
4199 Log4(("Host GS %#08x\n", u32Val));
4200 if (u32Val < HostGdtr.cbGdt)
4201 {
4202 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4203 HMR0DumpDescriptor(pDesc, u32Val, "GS: ");
4204 }
4205
4206 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_SS, &u32Val); AssertRC(rc);
4207 Log4(("Host SS %#08x\n", u32Val));
4208 if (u32Val < HostGdtr.cbGdt)
4209 {
4210 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4211 HMR0DumpDescriptor(pDesc, u32Val, "SS: ");
4212 }
4213
4214 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_TR, &u32Val); AssertRC(rc);
4215 Log4(("Host TR %#08x\n", u32Val));
4216 if (u32Val < HostGdtr.cbGdt)
4217 {
4218 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4219 HMR0DumpDescriptor(pDesc, u32Val, "TR: ");
4220 }
4221
4222 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
4223 Log4(("Host TR Base %#RHv\n", uHCReg));
4224 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
4225 Log4(("Host GDTR Base %#RHv\n", uHCReg));
4226 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
4227 Log4(("Host IDTR Base %#RHv\n", uHCReg));
4228 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
4229 Log4(("Host SYSENTER CS %#08x\n", u32Val));
4230 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
4231 Log4(("Host SYSENTER EIP %#RHv\n", uHCReg));
4232 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
4233 Log4(("Host SYSENTER ESP %#RHv\n", uHCReg));
4234 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
4235 Log4(("Host RSP %#RHv\n", uHCReg));
4236 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
4237 Log4(("Host RIP %#RHv\n", uHCReg));
4238# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4239 if (HMVMX_IS_64BIT_HOST_MODE())
4240 {
4241 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
4242 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
4243 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
4244 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
4245 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
4246 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
4247 }
4248# endif
4249#endif /* VBOX_STRICT */
4250 break;
4251 }
4252
4253 default:
4254 /* Impossible */
4255 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
4256 break;
4257 }
4258 NOREF(pVM);
4259}
4260
4261
4262#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4263#ifndef VMX_USE_CACHED_VMCS_ACCESSES
4264# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
4265#endif
4266#ifdef VBOX_STRICT
4267static bool hmR0VmxIsValidWriteField(uint32_t idxField)
4268{
4269 switch (idxField)
4270 {
4271 case VMX_VMCS_GUEST_RIP:
4272 case VMX_VMCS_GUEST_RSP:
4273 case VMX_VMCS_GUEST_SYSENTER_EIP:
4274 case VMX_VMCS_GUEST_SYSENTER_ESP:
4275 case VMX_VMCS_GUEST_GDTR_BASE:
4276 case VMX_VMCS_GUEST_IDTR_BASE:
4277 case VMX_VMCS_GUEST_CS_BASE:
4278 case VMX_VMCS_GUEST_DS_BASE:
4279 case VMX_VMCS_GUEST_ES_BASE:
4280 case VMX_VMCS_GUEST_FS_BASE:
4281 case VMX_VMCS_GUEST_GS_BASE:
4282 case VMX_VMCS_GUEST_SS_BASE:
4283 case VMX_VMCS_GUEST_LDTR_BASE:
4284 case VMX_VMCS_GUEST_TR_BASE:
4285 case VMX_VMCS_GUEST_CR3:
4286 return true;
4287 }
4288 return false;
4289}
4290
4291static bool hmR0VmxIsValidReadField(uint32_t idxField)
4292{
4293 switch (idxField)
4294 {
4295 /* Read-only fields. */
4296 case VMX_VMCS_RO_EXIT_QUALIFICATION:
4297 return true;
4298 }
4299 /* Remaining readable fields should also be writable. */
4300 return hmR0VmxIsValidWriteField(idxField);
4301}
4302#endif /* VBOX_STRICT */
4303
4304
4305/**
4306 * Executes the specified handler in 64-bit mode.
4307 *
4308 * @returns VBox status code.
4309 * @param pVM Pointer to the VM.
4310 * @param pVCpu Pointer to the VMCPU.
4311 * @param pCtx Pointer to the guest CPU context.
4312 * @param enmOp The operation to perform.
4313 * @param cbParam Number of parameters.
4314 * @param paParam Array of 32-bit parameters.
4315 */
4316VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
4317 uint32_t *paParam)
4318{
4319 int rc, rc2;
4320 PHMGLOBLCPUINFO pCpu;
4321 RTHCPHYS HCPhysCpuPage;
4322 RTCCUINTREG uOldEFlags;
4323
4324 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
4325 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
4326 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
4327 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
4328
4329#ifdef VBOX_STRICT
4330 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
4331 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
4332
4333 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
4334 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
4335#endif
4336
4337 /* Disable interrupts. */
4338 uOldEFlags = ASMIntDisableFlags();
4339
4340#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
4341 RTCPUID idHostCpu = RTMpCpuId();
4342 CPUMR0SetLApic(pVM, idHostCpu);
4343#endif
4344
4345 pCpu = HMR0GetCurrentCpu();
4346 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
4347
4348 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
4349 VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
4350
4351 /* Leave VMX Root Mode. */
4352 VMXDisable();
4353
4354 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
4355
4356 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
4357 CPUMSetHyperEIP(pVCpu, enmOp);
4358 for (int i = (int)cbParam - 1; i >= 0; i--)
4359 CPUMPushHyper(pVCpu, paParam[i]);
4360
4361 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
4362
4363 /* Call the switcher. */
4364 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
4365 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
4366
4367 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
4368 /* Make sure the VMX instructions don't cause #UD faults. */
4369 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
4370
4371 /* Re-enter VMX Root Mode */
4372 rc2 = VMXEnable(HCPhysCpuPage);
4373 if (RT_FAILURE(rc2))
4374 {
4375 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
4376 ASMSetFlags(uOldEFlags);
4377 return rc2;
4378 }
4379
4380 rc2 = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
4381 AssertRC(rc2);
4382 Assert(!(ASMGetFlags() & X86_EFL_IF));
4383 ASMSetFlags(uOldEFlags);
4384 return rc;
4385}
4386
4387
4388/**
4389 * Prepares for and executes VMLAUNCH (64 bits guests) for 32-bit hosts
4390 * supporting 64-bit guests.
4391 *
4392 * @returns VBox status code.
4393 * @param fResume Whether to VMLAUNCH or VMRESUME.
4394 * @param pCtx Pointer to the guest-CPU context.
4395 * @param pCache Pointer to the VMCS cache.
4396 * @param pVM Pointer to the VM.
4397 * @param pVCpu Pointer to the VMCPU.
4398 */
4399DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
4400{
4401 uint32_t aParam[6];
4402 PHMGLOBLCPUINFO pCpu = NULL;
4403 RTHCPHYS HCPhysCpuPage = 0;
4404 int rc = VERR_INTERNAL_ERROR_5;
4405
4406 pCpu = HMR0GetCurrentCpu();
4407 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
4408
4409#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4410 pCache->uPos = 1;
4411 pCache->interPD = PGMGetInterPaeCR3(pVM);
4412 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
4413#endif
4414
4415#ifdef VBOX_STRICT
4416 pCache->TestIn.HCPhysCpuPage = 0;
4417 pCache->TestIn.HCPhysVmcs = 0;
4418 pCache->TestIn.pCache = 0;
4419 pCache->TestOut.HCPhysVmcs = 0;
4420 pCache->TestOut.pCache = 0;
4421 pCache->TestOut.pCtx = 0;
4422 pCache->TestOut.eflags = 0;
4423#endif
4424
4425 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
4426 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */
4427 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
4428 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
4429 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
4430 aParam[5] = 0;
4431
4432#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4433 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
4434 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
4435#endif
4436 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, 6, &aParam[0]);
4437
4438#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4439 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
4440 Assert(pCtx->dr[4] == 10);
4441 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
4442#endif
4443
4444#ifdef VBOX_STRICT
4445 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
4446 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
4447 pVCpu->hm.s.vmx.HCPhysVmcs));
4448 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
4449 pCache->TestOut.HCPhysVmcs));
4450 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
4451 pCache->TestOut.pCache));
4452 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
4453 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
4454 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
4455 pCache->TestOut.pCtx));
4456 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
4457#endif
4458 return rc;
4459}
4460
4461
4462/**
4463 * Initialize the VMCS-Read cache. The VMCS cache is used for 32-bit hosts
4464 * running 64-bit guests (except 32-bit Darwin which runs with 64-bit paging in
4465 * 32-bit mode) for 64-bit fields that cannot be accessed in 32-bit mode. Some
4466 * 64-bit fields -can- be accessed (those that have a 32-bit FULL & HIGH part).
4467 *
4468 * @returns VBox status code.
4469 * @param pVM Pointer to the VM.
4470 * @param pVCpu Pointer to the VMCPU.
4471 */
4472static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
4473{
4474#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
4475{ \
4476 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
4477 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
4478 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
4479 ++cReadFields; \
4480}
4481
4482 AssertPtr(pVM);
4483 AssertPtr(pVCpu);
4484 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
4485 uint32_t cReadFields = 0;
4486
4487 /*
4488 * Don't remove the #if 0'd fields in this code. They're listed here for consistency
4489 * and serve to indicate exceptions to the rules.
4490 */
4491
4492 /* Guest-natural selector base fields. */
4493#if 0
4494 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
4495 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
4496 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
4497#endif
4498 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
4499 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
4500 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
4501 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
4502 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
4503 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
4504 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
4505 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
4506 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
4507 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
4508 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
4509 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
4510#if 0
4511 /* Unused natural width guest-state fields. */
4512 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
4513 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
4514#endif
4515 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
4516 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
4517
4518 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
4519#if 0
4520 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
4521 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
4522 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
4523 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
4524 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
4525 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
4526 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
4527 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
4528 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
4529#endif
4530
4531 /* Natural width guest-state fields. */
4532 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
4533#if 0
4534 /* Currently unused field. */
4535 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR);
4536#endif
4537
4538 if (pVM->hm.s.fNestedPaging)
4539 {
4540 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
4541 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
4542 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
4543 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
4544 }
4545 else
4546 {
4547 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
4548 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
4549 }
4550
4551#undef VMXLOCAL_INIT_READ_CACHE_FIELD
4552 return VINF_SUCCESS;
4553}
4554
4555
4556/**
4557 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
4558 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
4559 * darwin, running 64-bit guests).
4560 *
4561 * @returns VBox status code.
4562 * @param pVCpu Pointer to the VMCPU.
4563 * @param idxField The VMCS field encoding.
4564 * @param u64Val 16, 32 or 64 bits value.
4565 */
4566VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4567{
4568 int rc;
4569 switch (idxField)
4570 {
4571 /*
4572 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
4573 */
4574 /* 64-bit Control fields. */
4575 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
4576 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
4577 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
4578 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
4579 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
4580 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
4581 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
4582 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
4583 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
4584 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
4585 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
4586 case VMX_VMCS64_CTRL_EPTP_FULL:
4587 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
4588 /* 64-bit Guest-state fields. */
4589 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
4590 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
4591 case VMX_VMCS64_GUEST_PAT_FULL:
4592 case VMX_VMCS64_GUEST_EFER_FULL:
4593 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
4594 case VMX_VMCS64_GUEST_PDPTE0_FULL:
4595 case VMX_VMCS64_GUEST_PDPTE1_FULL:
4596 case VMX_VMCS64_GUEST_PDPTE2_FULL:
4597 case VMX_VMCS64_GUEST_PDPTE3_FULL:
4598 /* 64-bit Host-state fields. */
4599 case VMX_VMCS64_HOST_FIELD_PAT_FULL:
4600 case VMX_VMCS64_HOST_FIELD_EFER_FULL:
4601 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
4602 {
4603 rc = VMXWriteVmcs32(idxField, u64Val);
4604 rc |= VMXWriteVmcs32(idxField + 1, (uint32_t)(u64Val >> 32));
4605 break;
4606 }
4607
4608 /*
4609 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
4610 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
4611 */
4612 /* Natural-width Guest-state fields. */
4613 case VMX_VMCS_GUEST_CR3:
4614 case VMX_VMCS_GUEST_ES_BASE:
4615 case VMX_VMCS_GUEST_CS_BASE:
4616 case VMX_VMCS_GUEST_SS_BASE:
4617 case VMX_VMCS_GUEST_DS_BASE:
4618 case VMX_VMCS_GUEST_FS_BASE:
4619 case VMX_VMCS_GUEST_GS_BASE:
4620 case VMX_VMCS_GUEST_LDTR_BASE:
4621 case VMX_VMCS_GUEST_TR_BASE:
4622 case VMX_VMCS_GUEST_GDTR_BASE:
4623 case VMX_VMCS_GUEST_IDTR_BASE:
4624 case VMX_VMCS_GUEST_RSP:
4625 case VMX_VMCS_GUEST_RIP:
4626 case VMX_VMCS_GUEST_SYSENTER_ESP:
4627 case VMX_VMCS_GUEST_SYSENTER_EIP:
4628 {
4629 if (!(u64Val >> 32))
4630 {
4631 /* If this field is 64-bit, VT-x will zero out the top bits. */
4632 rc = VMXWriteVmcs32(idxField, (uint32_t)u64Val);
4633 }
4634 else
4635 {
4636 /* Assert that only the 32->64 switcher case should ever come here. */
4637 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
4638 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
4639 }
4640 break;
4641 }
4642
4643 default:
4644 {
4645 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
4646 rc = VERR_INVALID_PARAMETER;
4647 break;
4648 }
4649 }
4650 AssertRCReturn(rc, rc);
4651 return rc;
4652}
4653
4654
4655/**
4656 * Queue up a VMWRITE by using the VMCS write cache. This is only used on 32-bit
4657 * hosts (except darwin) for 64-bit guests.
4658 *
4659 * @param pVCpu Pointer to the VMCPU.
4660 * @param idxField The VMCS field encoding.
4661 * @param u64Val 16, 32 or 64 bits value.
4662 */
4663VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4664{
4665 AssertPtr(pVCpu);
4666 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
4667
4668 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
4669 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
4670
4671 /* Make sure there are no duplicates. */
4672 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
4673 {
4674 if (pCache->Write.aField[i] == idxField)
4675 {
4676 pCache->Write.aFieldVal[i] = u64Val;
4677 return VINF_SUCCESS;
4678 }
4679 }
4680
4681 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
4682 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
4683 pCache->Write.cValidEntries++;
4684 return VINF_SUCCESS;
4685}
4686
4687/* Enable later when the assembly code uses these as callbacks. */
4688#if 0
4689/*
4690 * Loads the VMCS write-cache into the CPU (by executing VMWRITEs).
4691 *
4692 * @param pVCpu Pointer to the VMCPU.
4693 * @param pCache Pointer to the VMCS cache.
4694 *
4695 * @remarks No-long-jump zone!!!
4696 */
4697VMMR0DECL(void) VMXWriteCachedVmcsLoad(PVMCPU pVCpu, PVMCSCACHE pCache)
4698{
4699 AssertPtr(pCache);
4700 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
4701 {
4702 int rc = VMXWriteVmcs64(pCache->Write.aField[i], pCache->Write.aFieldVal[i]);
4703 AssertRC(rc);
4704 }
4705 pCache->Write.cValidEntries = 0;
4706}
4707
4708
4709/**
4710 * Stores the VMCS read-cache from the CPU (by executing VMREADs).
4711 *
4712 * @param pVCpu Pointer to the VMCPU.
4713 * @param pCache Pointer to the VMCS cache.
4714 *
4715 * @remarks No-long-jump zone!!!
4716 */
4717VMMR0DECL(void) VMXReadCachedVmcsStore(PVMCPU pVCpu, PVMCSCACHE pCache)
4718{
4719 AssertPtr(pCache);
4720 for (uint32_t i = 0; i < pCache->Read.cValidEntries; i++)
4721 {
4722 int rc = VMXReadVmcs64(pCache->Read.aField[i], &pCache->Read.aFieldVal[i]);
4723 AssertRC(rc);
4724 }
4725}
4726#endif
4727#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
4728
4729
4730/**
4731 * Sets up the usage of TSC-offsetting and updates the VMCS. If offsetting is
4732 * not possible, cause VM-exits on RDTSC(P)s. Also sets up the VMX preemption
4733 * timer.
4734 *
4735 * @returns VBox status code.
4736 * @param pVCpu Pointer to the VMCPU.
4737 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4738 * out-of-sync. Make sure to update the required fields
4739 * before using them.
4740 * @remarks No-long-jump zone!!!
4741 */
4742static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4743{
4744 int rc = VERR_INTERNAL_ERROR_5;
4745 bool fOffsettedTsc = false;
4746 PVM pVM = pVCpu->CTX_SUFF(pVM);
4747 if (pVM->hm.s.vmx.fUsePreemptTimer)
4748 {
4749 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hm.s.vmx.u64TSCOffset);
4750
4751 /* Make sure the returned values have sane upper and lower boundaries. */
4752 uint64_t u64CpuHz = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
4753 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
4754 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
4755 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
4756
4757 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
4758 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc);
4759 }
4760 else
4761 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset);
4762
4763 if (fOffsettedTsc)
4764 {
4765 uint64_t u64CurTSC = ASMReadTSC();
4766 if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
4767 {
4768 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
4769 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc);
4770
4771 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
4772 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4773 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
4774 }
4775 else
4776 {
4777 /* VM-exit on RDTSC(P) as we would otherwise pass decreasing TSC values to the guest. */
4778 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
4779 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4780 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
4781 }
4782 }
4783 else
4784 {
4785 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
4786 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
4787 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4788 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
4789 }
4790}
4791
4792
4793/**
4794 * Determines if an exception is a contributory exception. Contributory
4795 * exceptions are ones which can cause double-faults. Page-fault is
4796 * intentionally not included here as it's a conditional contributory exception.
4797 *
4798 * @returns true if the exception is contributory, false otherwise.
4799 * @param uVector The exception vector.
4800 */
4801DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
4802{
4803 switch (uVector)
4804 {
4805 case X86_XCPT_GP:
4806 case X86_XCPT_SS:
4807 case X86_XCPT_NP:
4808 case X86_XCPT_TS:
4809 case X86_XCPT_DE:
4810 return true;
4811 default:
4812 break;
4813 }
4814 return false;
4815}
4816
4817
4818/**
4819 * Sets an event as a pending event to be injected into the guest.
4820 *
4821 * @param pVCpu Pointer to the VMCPU.
4822 * @param u32IntrInfo The VM-entry interruption-information field.
4823 * @param cbInstr The VM-entry instruction length in bytes (for software
4824 * interrupts, exceptions and privileged software
4825 * exceptions).
4826 * @param u32ErrCode The VM-entry exception error code.
4827 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
4828 * page-fault.
4829 *
4830 * @remarks Statistics counter assumes this is a guest event being injected or
4831 * re-injected into the guest, i.e. 'StatInjectPendingReflect' is
4832 * always incremented.
4833 */
4834DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntrInfo, uint32_t cbInstr, uint32_t u32ErrCode,
4835 RTGCUINTPTR GCPtrFaultAddress)
4836{
4837 Assert(!pVCpu->hm.s.Event.fPending);
4838 pVCpu->hm.s.Event.fPending = true;
4839 pVCpu->hm.s.Event.u64IntrInfo = u32IntrInfo;
4840 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
4841 pVCpu->hm.s.Event.cbInstr = cbInstr;
4842 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
4843
4844 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
4845}
4846
4847
4848/**
4849 * Sets a double-fault (#DF) exception as pending-for-injection into the VM.
4850 *
4851 * @param pVCpu Pointer to the VMCPU.
4852 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4853 * out-of-sync. Make sure to update the required fields
4854 * before using them.
4855 */
4856DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4857{
4858 uint32_t u32IntrInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
4859 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
4860 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
4861 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
4862}
4863
4864
4865/**
4866 * Handle a condition that occurred while delivering an event through the guest
4867 * IDT.
4868 *
4869 * @returns VBox status code (informational error codes included).
4870 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
4871 * @retval VINF_HM_DOUBLE_FAULT if a #DF condition was detected and we ought to
4872 * continue execution of the guest which will delivery the #DF.
4873 * @retval VINF_EM_RESET if we detected a triple-fault condition.
4874 *
4875 * @param pVCpu Pointer to the VMCPU.
4876 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4877 * out-of-sync. Make sure to update the required fields
4878 * before using them.
4879 * @param pVmxTransient Pointer to the VMX transient structure.
4880 *
4881 * @remarks No-long-jump zone!!!
4882 */
4883static int hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
4884{
4885 int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
4886 AssertRC(rc);
4887 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
4888 {
4889 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
4890 AssertRCReturn(rc, rc);
4891
4892 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
4893 uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntrInfo);
4894 uint32_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
4895
4896 typedef enum
4897 {
4898 VMXREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
4899 VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
4900 VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
4901 VMXREFLECTXCPT_NONE /* Nothing to reflect. */
4902 } VMXREFLECTXCPT;
4903
4904 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */
4905 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;
4906 if (VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntrInfo))
4907 {
4908 if (uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)
4909 {
4910 enmReflect = VMXREFLECTXCPT_XCPT;
4911#ifdef VBOX_STRICT
4912 if ( hmR0VmxIsContributoryXcpt(uIdtVector)
4913 && uExitVector == X86_XCPT_PF)
4914 {
4915 Log4(("IDT: vcpu[%RU32] Contributory #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
4916 }
4917#endif
4918 if ( uExitVector == X86_XCPT_PF
4919 && uIdtVector == X86_XCPT_PF)
4920 {
4921 pVmxTransient->fVectoringPF = true;
4922 Log4(("IDT: vcpu[%RU32] Vectoring #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
4923 }
4924 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
4925 && hmR0VmxIsContributoryXcpt(uExitVector)
4926 && ( hmR0VmxIsContributoryXcpt(uIdtVector)
4927 || uIdtVector == X86_XCPT_PF))
4928 {
4929 enmReflect = VMXREFLECTXCPT_DF;
4930 }
4931 else if (uIdtVector == X86_XCPT_DF)
4932 enmReflect = VMXREFLECTXCPT_TF;
4933 }
4934 else if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
4935 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
4936 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
4937 {
4938 /*
4939 * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and privileged software exception
4940 * (whatever they are) as they reoccur when restarting the instruction.
4941 */
4942 enmReflect = VMXREFLECTXCPT_XCPT;
4943 }
4944 }
4945 else
4946 {
4947 /*
4948 * If event delivery caused an EPT violation/misconfig or APIC access VM-exit, then the VM-exit
4949 * interruption-information will not be valid and we end up here. In such cases, it is sufficient to reflect the
4950 * original exception to the guest after handling the VM-exit.
4951 */
4952 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
4953 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
4954 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
4955 {
4956 enmReflect = VMXREFLECTXCPT_XCPT;
4957 }
4958 }
4959
4960 switch (enmReflect)
4961 {
4962 case VMXREFLECTXCPT_XCPT:
4963 {
4964 Assert( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4965 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
4966 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
4967
4968 uint32_t u32ErrCode = 0;
4969 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo))
4970 {
4971 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
4972 AssertRCReturn(rc, rc);
4973 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
4974 }
4975
4976 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */
4977 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INTR_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
4978 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
4979 rc = VINF_SUCCESS;
4980 Log4(("IDT: vcpu[%RU32] Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->idCpu,
4981 pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.u32ErrCode));
4982
4983 break;
4984 }
4985
4986 case VMXREFLECTXCPT_DF:
4987 {
4988 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
4989 rc = VINF_HM_DOUBLE_FAULT;
4990 Log4(("IDT: vcpu[%RU32] Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->idCpu,
4991 pVCpu->hm.s.Event.u64IntrInfo, uIdtVector, uExitVector));
4992
4993 break;
4994 }
4995
4996 case VMXREFLECTXCPT_TF:
4997 {
4998 rc = VINF_EM_RESET;
4999 Log4(("IDT: vcpu[%RU32] Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", pVCpu->idCpu, uIdtVector,
5000 uExitVector));
5001 break;
5002 }
5003
5004 default:
5005 Assert(rc == VINF_SUCCESS);
5006 break;
5007 }
5008 }
5009 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET);
5010 return rc;
5011}
5012
5013
5014/**
5015 * Saves the guest's CR0 register from the VMCS into the guest-CPU context.
5016 *
5017 * @returns VBox status code.
5018 * @param pVCpu Pointer to the VMCPU.
5019 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5020 * out-of-sync. Make sure to update the required fields
5021 * before using them.
5022 *
5023 * @remarks No-long-jump zone!!!
5024 */
5025static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5026{
5027 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0))
5028 {
5029 uint32_t uVal = 0;
5030 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal);
5031 AssertRCReturn(rc, rc);
5032 uint32_t uShadow = 0;
5033 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
5034 AssertRCReturn(rc, rc);
5035
5036 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR0Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR0Mask);
5037 CPUMSetGuestCR0(pVCpu, uVal);
5038 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR0;
5039 }
5040 return VINF_SUCCESS;
5041}
5042
5043
5044/**
5045 * Saves the guest's CR4 register from the VMCS into the guest-CPU context.
5046 *
5047 * @returns VBox status code.
5048 * @param pVCpu Pointer to the VMCPU.
5049 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5050 * out-of-sync. Make sure to update the required fields
5051 * before using them.
5052 *
5053 * @remarks No-long-jump zone!!!
5054 */
5055static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5056{
5057 int rc = VINF_SUCCESS;
5058 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4))
5059 {
5060 uint32_t uVal = 0;
5061 uint32_t uShadow = 0;
5062 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal);
5063 AssertRCReturn(rc, rc);
5064 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
5065 AssertRCReturn(rc, rc);
5066
5067 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR4Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR4Mask);
5068 CPUMSetGuestCR4(pVCpu, uVal);
5069 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR4;
5070 }
5071 return rc;
5072}
5073
5074
5075/**
5076 * Saves the guest's RIP register from the VMCS into the guest-CPU context.
5077 *
5078 * @returns VBox status code.
5079 * @param pVCpu Pointer to the VMCPU.
5080 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5081 * out-of-sync. Make sure to update the required fields
5082 * before using them.
5083 *
5084 * @remarks No-long-jump zone!!!
5085 */
5086static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5087{
5088 int rc = VINF_SUCCESS;
5089 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP))
5090 {
5091 uint64_t u64Val = 0;
5092 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
5093 AssertRCReturn(rc, rc);
5094
5095 pMixedCtx->rip = u64Val;
5096 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RIP;
5097 }
5098 return rc;
5099}
5100
5101
5102/**
5103 * Saves the guest's RSP register from the VMCS into the guest-CPU context.
5104 *
5105 * @returns VBox status code.
5106 * @param pVCpu Pointer to the VMCPU.
5107 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5108 * out-of-sync. Make sure to update the required fields
5109 * before using them.
5110 *
5111 * @remarks No-long-jump zone!!!
5112 */
5113static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5114{
5115 int rc = VINF_SUCCESS;
5116 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RSP))
5117 {
5118 uint64_t u64Val = 0;
5119 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
5120 AssertRCReturn(rc, rc);
5121
5122 pMixedCtx->rsp = u64Val;
5123 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RSP;
5124 }
5125 return rc;
5126}
5127
5128
5129/**
5130 * Saves the guest's RFLAGS from the VMCS into the guest-CPU context.
5131 *
5132 * @returns VBox status code.
5133 * @param pVCpu Pointer to the VMCPU.
5134 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5135 * out-of-sync. Make sure to update the required fields
5136 * before using them.
5137 *
5138 * @remarks No-long-jump zone!!!
5139 */
5140static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5141{
5142 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS))
5143 {
5144 uint32_t uVal = 0;
5145 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal);
5146 AssertRCReturn(rc, rc);
5147
5148 pMixedCtx->eflags.u32 = uVal;
5149 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) /* Undo our real-on-v86-mode changes to eflags if necessary. */
5150 {
5151 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
5152 Log4(("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32));
5153
5154 pMixedCtx->eflags.Bits.u1VM = 0;
5155 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.eflags.Bits.u2IOPL;
5156 }
5157
5158 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RFLAGS;
5159 }
5160 return VINF_SUCCESS;
5161}
5162
5163
5164/**
5165 * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the
5166 * guest-CPU context.
5167 */
5168DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5169{
5170 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
5171 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
5172 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
5173 return rc;
5174}
5175
5176
5177/**
5178 * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
5179 * from the guest-state area in the VMCS.
5180 *
5181 * @param pVCpu Pointer to the VMCPU.
5182 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5183 * out-of-sync. Make sure to update the required fields
5184 * before using them.
5185 *
5186 * @remarks No-long-jump zone!!!
5187 */
5188static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5189{
5190 uint32_t uIntrState = 0;
5191 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
5192 AssertRC(rc);
5193
5194 if (!uIntrState)
5195 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5196 else
5197 {
5198 Assert( uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
5199 || uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
5200 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
5201 AssertRC(rc);
5202 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */
5203 AssertRC(rc);
5204
5205 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
5206 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
5207 }
5208}
5209
5210
5211/**
5212 * Saves the guest's activity state.
5213 *
5214 * @returns VBox status code.
5215 * @param pVCpu Pointer to the VMCPU.
5216 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5217 * out-of-sync. Make sure to update the required fields
5218 * before using them.
5219 *
5220 * @remarks No-long-jump zone!!!
5221 */
5222static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5223{
5224 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
5225 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_ACTIVITY_STATE;
5226 return VINF_SUCCESS;
5227}
5228
5229
5230/**
5231 * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from
5232 * the current VMCS into the guest-CPU context.
5233 *
5234 * @returns VBox status code.
5235 * @param pVCpu Pointer to the VMCPU.
5236 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5237 * out-of-sync. Make sure to update the required fields
5238 * before using them.
5239 *
5240 * @remarks No-long-jump zone!!!
5241 */
5242static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5243{
5244 int rc = VINF_SUCCESS;
5245 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
5246 {
5247 uint32_t u32Val = 0;
5248 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc);
5249 pMixedCtx->SysEnter.cs = u32Val;
5250 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR;
5251 }
5252
5253 uint64_t u64Val = 0;
5254 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
5255 {
5256 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &u64Val); AssertRCReturn(rc, rc);
5257 pMixedCtx->SysEnter.eip = u64Val;
5258 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR;
5259 }
5260 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
5261 {
5262 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &u64Val); AssertRCReturn(rc, rc);
5263 pMixedCtx->SysEnter.esp = u64Val;
5264 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR;
5265 }
5266 return rc;
5267}
5268
5269
5270/**
5271 * Saves the guest FS_BASE MSRs from the current VMCS into the guest-CPU
5272 * context.
5273 *
5274 * @returns VBox status code.
5275 * @param pVCpu Pointer to the VMCPU.
5276 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5277 * out-of-sync. Make sure to update the required fields
5278 * before using them.
5279 *
5280 * @remarks No-long-jump zone!!!
5281 */
5282static int hmR0VmxSaveGuestFSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5283{
5284 int rc = VINF_SUCCESS;
5285 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_FS_BASE_MSR))
5286 {
5287 uint64_t u64Val = 0;
5288 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_FS_BASE, &u64Val); AssertRCReturn(rc, rc);
5289 pMixedCtx->fs.u64Base = u64Val;
5290 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_FS_BASE_MSR;
5291 }
5292 return rc;
5293}
5294
5295
5296/**
5297 * Saves the guest GS_BASE MSRs from the current VMCS into the guest-CPU
5298 * context.
5299 *
5300 * @returns VBox status code.
5301 * @param pVCpu Pointer to the VMCPU.
5302 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5303 * out-of-sync. Make sure to update the required fields
5304 * before using them.
5305 *
5306 * @remarks No-long-jump zone!!!
5307 */
5308static int hmR0VmxSaveGuestGSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5309{
5310 int rc = VINF_SUCCESS;
5311 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GS_BASE_MSR))
5312 {
5313 uint64_t u64Val = 0;
5314 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GS_BASE, &u64Val); AssertRCReturn(rc, rc);
5315 pMixedCtx->gs.u64Base = u64Val;
5316 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GS_BASE_MSR;
5317 }
5318 return rc;
5319}
5320
5321
5322/**
5323 * Saves the auto load/store'd guest MSRs from the current VMCS into the
5324 * guest-CPU context. Currently these are LSTAR, STAR, SFMASK, KERNEL-GS BASE
5325 * and TSC_AUX.
5326 *
5327 * @returns VBox status code.
5328 * @param pVCpu Pointer to the VMCPU.
5329 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5330 * out-of-sync. Make sure to update the required fields
5331 * before using them.
5332 *
5333 * @remarks No-long-jump zone!!!
5334 */
5335static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5336{
5337 if (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)
5338 return VINF_SUCCESS;
5339
5340#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
5341 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++)
5342 {
5343 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
5344 pMsr += i;
5345 switch (pMsr->u32IndexMSR)
5346 {
5347 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;
5348 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;
5349 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;
5350 case MSR_K8_TSC_AUX: CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value); break;
5351 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;
5352 case MSR_K6_EFER: /* EFER can't be changed without causing a VM-exit. */ break;
5353 default:
5354 {
5355 AssertFailed();
5356 return VERR_HM_UNEXPECTED_LD_ST_MSR;
5357 }
5358 }
5359 }
5360#endif
5361
5362 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS;
5363 return VINF_SUCCESS;
5364}
5365
5366
5367/**
5368 * Saves the guest control registers from the current VMCS into the guest-CPU
5369 * context.
5370 *
5371 * @returns VBox status code.
5372 * @param pVCpu Pointer to the VMCPU.
5373 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5374 * out-of-sync. Make sure to update the required fields
5375 * before using them.
5376 *
5377 * @remarks No-long-jump zone!!!
5378 */
5379static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5380{
5381 /* Guest CR0. Guest FPU. */
5382 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5383 AssertRCReturn(rc, rc);
5384
5385 /* Guest CR4. */
5386 rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
5387 AssertRCReturn(rc, rc);
5388
5389 /* Guest CR2 - updated always during the world-switch or in #PF. */
5390 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
5391 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR3))
5392 {
5393 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
5394 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4);
5395
5396 PVM pVM = pVCpu->CTX_SUFF(pVM);
5397 if ( pVM->hm.s.vmx.fUnrestrictedGuest
5398 || ( pVM->hm.s.fNestedPaging
5399 && CPUMIsGuestPagingEnabledEx(pMixedCtx)))
5400 {
5401 uint64_t u64Val = 0;
5402 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
5403 if (pMixedCtx->cr3 != u64Val)
5404 {
5405 CPUMSetGuestCR3(pVCpu, u64Val);
5406 if (VMMRZCallRing3IsEnabled(pVCpu))
5407 {
5408 PGMUpdateCR3(pVCpu, u64Val);
5409 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5410 }
5411 else
5412 {
5413 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
5414 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
5415 }
5416 }
5417
5418 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
5419 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR (EFER is always up-to-date). */
5420 {
5421 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
5422 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
5423 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
5424 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
5425
5426 if (VMMRZCallRing3IsEnabled(pVCpu))
5427 {
5428 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5429 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
5430 }
5431 else
5432 {
5433 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
5434 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
5435 }
5436 }
5437 }
5438
5439 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR3;
5440 }
5441
5442 /*
5443 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
5444 * -> VMMRZCallRing3Disable() -> hmR0VmxSaveGuestState() -> Set VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
5445 * -> continue with VM-exit handling -> hmR0VmxSaveGuestControlRegs() and here we are.
5446 *
5447 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
5448 */
5449 if (VMMRZCallRing3IsEnabled(pVCpu))
5450 {
5451 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
5452 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
5453
5454 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
5455 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5456
5457 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5458 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
5459 }
5460
5461 return rc;
5462}
5463
5464
5465/**
5466 * Reads a guest segment register from the current VMCS into the guest-CPU
5467 * context.
5468 *
5469 * @returns VBox status code.
5470 * @param pVCpu Pointer to the VMCPU.
5471 * @param idxSel Index of the selector in the VMCS.
5472 * @param idxLimit Index of the segment limit in the VMCS.
5473 * @param idxBase Index of the segment base in the VMCS.
5474 * @param idxAccess Index of the access rights of the segment in the VMCS.
5475 * @param pSelReg Pointer to the segment selector.
5476 *
5477 * @remarks No-long-jump zone!!!
5478 * @remarks Never call this function directly!!! Use the VMXLOCAL_READ_SEG()
5479 * macro as that takes care of whether to read from the VMCS cache or
5480 * not.
5481 */
5482DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
5483 PCPUMSELREG pSelReg)
5484{
5485 uint32_t u32Val = 0;
5486 int rc = VMXReadVmcs32(idxSel, &u32Val);
5487 AssertRCReturn(rc, rc);
5488 pSelReg->Sel = (uint16_t)u32Val;
5489 pSelReg->ValidSel = (uint16_t)u32Val;
5490 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
5491
5492 rc = VMXReadVmcs32(idxLimit, &u32Val);
5493 AssertRCReturn(rc, rc);
5494 pSelReg->u32Limit = u32Val;
5495
5496 uint64_t u64Val = 0;
5497 rc = VMXReadVmcsGstNByIdxVal(idxBase, &u64Val);
5498 AssertRCReturn(rc, rc);
5499 pSelReg->u64Base = u64Val;
5500
5501 rc = VMXReadVmcs32(idxAccess, &u32Val);
5502 AssertRCReturn(rc, rc);
5503 pSelReg->Attr.u = u32Val;
5504
5505 /*
5506 * If VT-x marks the segment as unusable, the rest of the attributes are undefined with certain exceptions (some bits in
5507 * CS, SS). Regardless, we have to clear the bits here and only retain the unusable bit because the unusable bit is specific
5508 * to VT-x, everyone else relies on the attribute being zero and have no clue what the unusable bit is.
5509 *
5510 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
5511 *
5512 * bird: This isn't quite as simple. VT-x and VBox(!) requires the DPL for SS to be the same as CPL. In 64-bit mode it
5513 * is possible (int/trap/xxx injects does this when switching rings) to load SS with a NULL selector and RPL=CPL.
5514 * The Attr.u = X86DESCATTR_UNUSABLE works fine as long as nobody uses ring-1 or ring-2. VT-x updates the DPL
5515 * correctly in the attributes of SS even when the unusable bit is set, we need to preserve the DPL or we get invalid
5516 * guest state trouble. Try bs2-cpu-hidden-regs-1.
5517 */
5518 if (pSelReg->Attr.u & X86DESCATTR_UNUSABLE)
5519 {
5520 Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR); /* TR is the only selector that can never be unusable. */
5521 Log4(("hmR0VmxReadSegmentReg: Unusable idxSel=%#x attr=%#x\n", idxSel, pSelReg->Attr.u));
5522
5523 if (idxSel == VMX_VMCS16_GUEST_FIELD_SS)
5524 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_DPL;
5525 else if (idxSel == VMX_VMCS16_GUEST_FIELD_CS)
5526 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G;
5527 else
5528 pSelReg->Attr.u = X86DESCATTR_UNUSABLE;
5529 }
5530 return VINF_SUCCESS;
5531}
5532
5533
5534#ifdef VMX_USE_CACHED_VMCS_ACCESSES
5535# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
5536 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
5537 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
5538#else
5539# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
5540 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
5541 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
5542#endif
5543
5544
5545/**
5546 * Saves the guest segment registers from the current VMCS into the guest-CPU
5547 * context.
5548 *
5549 * @returns VBox status code.
5550 * @param pVCpu Pointer to the VMCPU.
5551 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5552 * out-of-sync. Make sure to update the required fields
5553 * before using them.
5554 *
5555 * @remarks No-long-jump zone!!!
5556 */
5557static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5558{
5559 /* Guest segment registers. */
5560 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SEGMENT_REGS))
5561 {
5562 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); AssertRCReturn(rc, rc);
5563 rc = VMXLOCAL_READ_SEG(CS, cs); AssertRCReturn(rc, rc);
5564 rc = VMXLOCAL_READ_SEG(SS, ss); AssertRCReturn(rc, rc);
5565 rc = VMXLOCAL_READ_SEG(DS, ds); AssertRCReturn(rc, rc);
5566 rc = VMXLOCAL_READ_SEG(ES, es); AssertRCReturn(rc, rc);
5567 rc = VMXLOCAL_READ_SEG(FS, fs); AssertRCReturn(rc, rc);
5568 rc = VMXLOCAL_READ_SEG(GS, gs); AssertRCReturn(rc, rc);
5569
5570 /* Restore segment attributes for real-on-v86 mode hack. */
5571 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
5572 {
5573 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrCS.u;
5574 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrSS.u;
5575 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrDS.u;
5576 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrES.u;
5577 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrFS.u;
5578 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrGS.u;
5579 }
5580 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SEGMENT_REGS;
5581 }
5582
5583 return VINF_SUCCESS;
5584}
5585
5586
5587/**
5588 * Saves the guest descriptor table registers and task register from the current
5589 * VMCS into the guest-CPU context.
5590 *
5591 * @returns VBox status code.
5592 * @param pVCpu Pointer to the VMCPU.
5593 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5594 * out-of-sync. Make sure to update the required fields
5595 * before using them.
5596 *
5597 * @remarks No-long-jump zone!!!
5598 */
5599static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5600{
5601 int rc = VINF_SUCCESS;
5602
5603 /* Guest LDTR. */
5604 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_LDTR))
5605 {
5606 rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
5607 AssertRCReturn(rc, rc);
5608 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LDTR;
5609 }
5610
5611 /* Guest GDTR. */
5612 uint64_t u64Val = 0;
5613 uint32_t u32Val = 0;
5614 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GDTR))
5615 {
5616 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
5617 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
5618 pMixedCtx->gdtr.pGdt = u64Val;
5619 pMixedCtx->gdtr.cbGdt = u32Val;
5620 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GDTR;
5621 }
5622
5623 /* Guest IDTR. */
5624 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_IDTR))
5625 {
5626 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
5627 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
5628 pMixedCtx->idtr.pIdt = u64Val;
5629 pMixedCtx->idtr.cbIdt = u32Val;
5630 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_IDTR;
5631 }
5632
5633 /* Guest TR. */
5634 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_TR))
5635 {
5636 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5637 AssertRCReturn(rc, rc);
5638
5639 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
5640 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
5641 {
5642 rc = VMXLOCAL_READ_SEG(TR, tr);
5643 AssertRCReturn(rc, rc);
5644 }
5645 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_TR;
5646 }
5647 return rc;
5648}
5649
5650#undef VMXLOCAL_READ_SEG
5651
5652
5653/**
5654 * Saves the guest debug-register DR7 from the current VMCS into the guest-CPU
5655 * context.
5656 *
5657 * @returns VBox status code.
5658 * @param pVCpu Pointer to the VMCPU.
5659 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5660 * out-of-sync. Make sure to update the required fields
5661 * before using them.
5662 *
5663 * @remarks No-long-jump zone!!!
5664 */
5665static int hmR0VmxSaveGuestDR7(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5666{
5667 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_DEBUG))
5668 {
5669 if (!CPUMIsHyperDebugStateActive(pVCpu))
5670 {
5671 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
5672 uint32_t u32Val;
5673 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); AssertRCReturn(rc, rc);
5674 pMixedCtx->dr[7] = u32Val;
5675 }
5676
5677 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_DEBUG;
5678 }
5679 return VINF_SUCCESS;
5680}
5681
5682
5683/**
5684 * Saves the guest APIC state from the current VMCS into the guest-CPU context.
5685 *
5686 * @returns VBox status code.
5687 * @param pVCpu Pointer to the VMCPU.
5688 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5689 * out-of-sync. Make sure to update the required fields
5690 * before using them.
5691 *
5692 * @remarks No-long-jump zone!!!
5693 */
5694static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5695{
5696 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
5697 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_APIC_STATE;
5698 return VINF_SUCCESS;
5699}
5700
5701
5702/**
5703 * Saves the entire guest state from the currently active VMCS into the
5704 * guest-CPU context. This essentially VMREADs all guest-data.
5705 *
5706 * @returns VBox status code.
5707 * @param pVCpu Pointer to the VMCPU.
5708 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5709 * out-of-sync. Make sure to update the required fields
5710 * before using them.
5711 */
5712static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5713{
5714 Assert(pVCpu);
5715 Assert(pMixedCtx);
5716
5717 if (pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL)
5718 return VINF_SUCCESS;
5719
5720 /* Though we can longjmp to ring-3 due to log-flushes here and get recalled again on the ring-3 callback path,
5721 there is no real need to. */
5722 if (VMMRZCallRing3IsEnabled(pVCpu))
5723 VMMR0LogFlushDisable(pVCpu);
5724 else
5725 Assert(VMMR0IsLogFlushDisabled(pVCpu));
5726 Log4Func(("vcpu[%RU32]\n", pVCpu->idCpu));
5727
5728 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
5729 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5730
5731 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
5732 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5733
5734 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
5735 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5736
5737 rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
5738 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5739
5740 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
5741 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDR7 failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5742
5743 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
5744 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5745
5746 rc = hmR0VmxSaveGuestFSBaseMsr(pVCpu, pMixedCtx);
5747 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestFSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5748
5749 rc = hmR0VmxSaveGuestGSBaseMsr(pVCpu, pMixedCtx);
5750 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5751
5752 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
5753 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5754
5755 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
5756 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5757
5758 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
5759 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestApicState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5760
5761 AssertMsg(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL,
5762 ("Missed guest state bits while saving state; residue %RX32\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
5763
5764 if (VMMRZCallRing3IsEnabled(pVCpu))
5765 VMMR0LogFlushEnable(pVCpu);
5766
5767 return rc;
5768}
5769
5770
5771/**
5772 * Check per-VM and per-VCPU force flag actions that require us to go back to
5773 * ring-3 for one reason or another.
5774 *
5775 * @returns VBox status code (information status code included).
5776 * @retval VINF_SUCCESS if we don't have any actions that require going back to
5777 * ring-3.
5778 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
5779 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
5780 * interrupts)
5781 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
5782 * all EMTs to be in ring-3.
5783 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
5784 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
5785 * to the EM loop.
5786 *
5787 * @param pVM Pointer to the VM.
5788 * @param pVCpu Pointer to the VMCPU.
5789 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5790 * out-of-sync. Make sure to update the required fields
5791 * before using them.
5792 */
5793static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5794{
5795 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5796
5797 if ( VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
5798 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
5799 || VMCPU_FF_IS_PENDING(pVCpu, !pVCpu->hm.s.fSingleInstruction
5800 ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
5801 {
5802 /* We need the control registers now, make sure the guest-CPU context is updated. */
5803 int rc3 = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
5804 AssertRCReturn(rc3, rc3);
5805
5806 /* Pending HM CR3 sync. */
5807 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
5808 {
5809 int rc2 = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
5810 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
5811 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
5812 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5813 }
5814
5815 /* Pending HM PAE PDPEs. */
5816 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
5817 {
5818 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5819 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
5820 }
5821
5822 /* Pending PGM C3 sync. */
5823 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
5824 {
5825 int rc2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
5826 if (rc2 != VINF_SUCCESS)
5827 {
5828 AssertRC(rc2);
5829 Log4(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", rc2));
5830 return rc2;
5831 }
5832 }
5833
5834 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
5835 /* -XXX- what was that about single stepping? */
5836 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
5837 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
5838 {
5839 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
5840 int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
5841 Log4(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
5842 return rc2;
5843 }
5844
5845 /* Pending VM request packets, such as hardware interrupts. */
5846 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
5847 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
5848 {
5849 Log4(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
5850 return VINF_EM_PENDING_REQUEST;
5851 }
5852
5853 /* Pending PGM pool flushes. */
5854 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
5855 {
5856 Log4(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
5857 return VINF_PGM_POOL_FLUSH_PENDING;
5858 }
5859
5860 /* Pending DMA requests. */
5861 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
5862 {
5863 Log4(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
5864 return VINF_EM_RAW_TO_R3;
5865 }
5866 }
5867
5868 /* Paranoia. */
5869 return VINF_SUCCESS;
5870}
5871
5872
5873/**
5874 * Converts any TRPM trap into a pending HM event. This is typically used when
5875 * entering from ring-3 (not longjmp returns).
5876 *
5877 * @param pVCpu Pointer to the VMCPU.
5878 */
5879static void hmR0VmxTrpmTrapToPendingEvent(PVMCPU pVCpu)
5880{
5881 Assert(TRPMHasTrap(pVCpu));
5882 Assert(!pVCpu->hm.s.Event.fPending);
5883
5884 uint8_t uVector;
5885 TRPMEVENT enmTrpmEvent;
5886 RTGCUINT uErrCode;
5887 RTGCUINTPTR GCPtrFaultAddress;
5888 uint8_t cbInstr;
5889
5890 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
5891 AssertRC(rc);
5892
5893 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntrInfo. */
5894 uint32_t u32IntrInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
5895 if (enmTrpmEvent == TRPM_TRAP)
5896 {
5897 switch (uVector)
5898 {
5899 case X86_XCPT_BP:
5900 case X86_XCPT_OF:
5901 {
5902 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5903 break;
5904 }
5905
5906 case X86_XCPT_PF:
5907 case X86_XCPT_DF:
5908 case X86_XCPT_TS:
5909 case X86_XCPT_NP:
5910 case X86_XCPT_SS:
5911 case X86_XCPT_GP:
5912 case X86_XCPT_AC:
5913 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5914 /* no break! */
5915 default:
5916 {
5917 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5918 break;
5919 }
5920 }
5921 }
5922 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
5923 {
5924 if (uVector == X86_XCPT_NMI)
5925 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5926 else
5927 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5928 }
5929 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
5930 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5931 else
5932 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
5933
5934 rc = TRPMResetTrap(pVCpu);
5935 AssertRC(rc);
5936 Log4(("TRPM->HM event: u32IntrInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
5937 u32IntrInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
5938
5939 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, uErrCode, GCPtrFaultAddress);
5940 STAM_COUNTER_DEC(&pVCpu->hm.s.StatInjectPendingReflect);
5941}
5942
5943
5944/**
5945 * Converts any pending HM event into a TRPM trap. Typically used when leaving
5946 * VT-x to execute any instruction.
5947 *
5948 * @param pvCpu Pointer to the VMCPU.
5949 */
5950static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
5951{
5952 Assert(pVCpu->hm.s.Event.fPending);
5953
5954 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
5955 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntrInfo);
5956 bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo);
5957 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
5958
5959 /* If a trap was already pending, we did something wrong! */
5960 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
5961
5962 TRPMEVENT enmTrapType;
5963 switch (uVectorType)
5964 {
5965 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
5966 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
5967 enmTrapType = TRPM_HARDWARE_INT;
5968 break;
5969 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
5970 enmTrapType = TRPM_SOFTWARE_INT;
5971 break;
5972 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
5973 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
5974 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
5975 enmTrapType = TRPM_TRAP;
5976 break;
5977 default:
5978 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
5979 enmTrapType = TRPM_32BIT_HACK;
5980 break;
5981 }
5982
5983 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
5984
5985 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
5986 AssertRC(rc);
5987
5988 if (fErrorCodeValid)
5989 TRPMSetErrorCode(pVCpu, uErrorCode);
5990
5991 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5992 && uVector == X86_XCPT_PF)
5993 {
5994 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
5995 }
5996 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5997 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5998 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5999 {
6000 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6001 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
6002 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
6003 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
6004 }
6005 pVCpu->hm.s.Event.fPending = false;
6006}
6007
6008
6009/**
6010 * Does the necessary state syncing before returning to ring-3 for any reason
6011 * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
6012 *
6013 * @param pVM Pointer to the VM.
6014 * @param pVCpu Pointer to the VMCPU.
6015 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6016 * out-of-sync. Make sure to update the required fields
6017 * before using them.
6018 *
6019 * @remarks No-long-jmp zone!!!
6020 */
6021static void hmR0VmxLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6022{
6023 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6024 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6025
6026 /* Save the guest state if necessary. */
6027 if (pVCpu->hm.s.vmx.fUpdatedGuestState != HMVMX_UPDATED_GUEST_ALL)
6028 {
6029 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
6030 AssertRC(rc);
6031 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL);
6032 }
6033
6034 /* Restore host FPU state if necessary and resync on next R0 reentry .*/
6035 if (CPUMIsGuestFPUStateActive(pVCpu))
6036 {
6037 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
6038 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
6039 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
6040 }
6041
6042 /* Restore host debug registers if necessary and resync on next R0 reentry. */
6043#ifdef VBOX_STRICT
6044 if (CPUMIsHyperDebugStateActive(pVCpu))
6045 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
6046#endif
6047 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */))
6048 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
6049 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
6050 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
6051
6052 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
6053 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
6054 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
6055 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
6056 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
6057 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
6058 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
6059 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
6060
6061 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
6062}
6063
6064
6065/**
6066 * Does the necessary state syncing before doing a longjmp to ring-3.
6067 *
6068 * @param pVM Pointer to the VM.
6069 * @param pVCpu Pointer to the VMCPU.
6070 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6071 * out-of-sync. Make sure to update the required fields
6072 * before using them.
6073 *
6074 * @remarks No-long-jmp zone!!!
6075 */
6076DECLINLINE(void) hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6077{
6078 hmR0VmxLeave(pVM, pVCpu, pMixedCtx);
6079}
6080
6081
6082/**
6083 * An action requires us to go back to ring-3. This function does the necessary
6084 * steps before we can safely return to ring-3. This is not the same as longjmps
6085 * to ring-3, this is voluntary and prepares the guest so it may continue
6086 * executing outside HM (recompiler/IEM).
6087 *
6088 * @param pVM Pointer to the VM.
6089 * @param pVCpu Pointer to the VMCPU.
6090 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6091 * out-of-sync. Make sure to update the required fields
6092 * before using them.
6093 * @param rcExit The reason for exiting to ring-3. Can be
6094 * VINF_VMM_UNKNOWN_RING3_CALL.
6095 */
6096static void hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
6097{
6098 Assert(pVM);
6099 Assert(pVCpu);
6100 Assert(pMixedCtx);
6101 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6102
6103 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_GUEST_STATE))
6104 {
6105 /* We want to see what the guest-state was before VM-entry, don't resync here, as we won't continue guest execution. */
6106 return;
6107 }
6108 else if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
6109 {
6110 VMXGetActivateVMCS(&pVCpu->hm.s.vmx.LastError.u64VMCSPhys);
6111 pVCpu->hm.s.vmx.LastError.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
6112 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
6113 pVCpu->hm.s.vmx.LastError.idCurrentCpu = RTMpCpuId();
6114 return;
6115 }
6116
6117 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
6118 VMMRZCallRing3Disable(pVCpu);
6119 Log4(("hmR0VmxExitToRing3: pVCpu=%p idCpu=%RU32 rcExit=%d\n", pVCpu, pVCpu->idCpu, rcExit));
6120
6121 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
6122 if (pVCpu->hm.s.Event.fPending)
6123 {
6124 hmR0VmxPendingEventToTrpmTrap(pVCpu);
6125 Assert(!pVCpu->hm.s.Event.fPending);
6126 }
6127
6128 /* Save guest state and restore host state bits. */
6129 hmR0VmxLeave(pVM, pVCpu, pMixedCtx);
6130 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
6131
6132 /* Sync recompiler state. */
6133 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
6134 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
6135 | CPUM_CHANGED_LDTR
6136 | CPUM_CHANGED_GDTR
6137 | CPUM_CHANGED_IDTR
6138 | CPUM_CHANGED_TR
6139 | CPUM_CHANGED_HIDDEN_SEL_REGS);
6140 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
6141 if ( pVM->hm.s.fNestedPaging
6142 && CPUMIsGuestPagingEnabledEx(pMixedCtx))
6143 {
6144 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
6145 }
6146
6147 /* On our way back from ring-3 the following needs to be done. */
6148 /** @todo This can change with preemption hooks. */
6149 if (rcExit == VINF_EM_RAW_INTERRUPT)
6150 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
6151 else
6152 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
6153
6154 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
6155 VMMRZCallRing3Enable(pVCpu);
6156}
6157
6158
6159/**
6160 * VMMRZCallRing3() callback wrapper which saves the guest state before we
6161 * longjump to ring-3 and possibly get preempted.
6162 *
6163 * @param pVCpu Pointer to the VMCPU.
6164 * @param enmOperation The operation causing the ring-3 longjump.
6165 * @param pvUser The user argument (pointer to the possibly
6166 * out-of-date guest-CPU context).
6167 *
6168 * @remarks Must never be called with @a enmOperation ==
6169 * VMMCALLRING3_VM_R0_ASSERTION.
6170 */
6171DECLCALLBACK(void) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
6172{
6173 /* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion. */
6174 Assert(pVCpu);
6175 Assert(pvUser);
6176 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6177 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6178
6179 VMMRZCallRing3Disable(pVCpu);
6180 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6181 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
6182 hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
6183 VMMRZCallRing3Enable(pVCpu);
6184}
6185
6186
6187/**
6188 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
6189 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
6190 *
6191 * @param pVCpu Pointer to the VMCPU.
6192 */
6193DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
6194{
6195 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
6196 {
6197 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
6198 {
6199 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
6200 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
6201 AssertRC(rc);
6202 }
6203 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
6204}
6205
6206
6207/**
6208 * Injects any pending events into the guest if the guest is in a state to
6209 * receive them.
6210 *
6211 * @returns VBox status code (informational status codes included).
6212 * @param pVCpu Pointer to the VMCPU.
6213 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6214 * out-of-sync. Make sure to update the required fields
6215 * before using them.
6216 */
6217static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6218{
6219 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
6220 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
6221 bool fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
6222 bool fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
6223
6224 Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS));
6225 Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/
6226 && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
6227 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
6228 Assert(!TRPMHasTrap(pVCpu));
6229
6230 int rc = VINF_SUCCESS;
6231 if (pVCpu->hm.s.Event.fPending) /* First, inject any pending HM events. */
6232 {
6233 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
6234 bool fInject = true;
6235 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
6236 {
6237 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6238 AssertRCReturn(rc, rc);
6239 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
6240 if ( fBlockInt
6241 || fBlockSti
6242 || fBlockMovSS)
6243 {
6244 fInject = false;
6245 }
6246 }
6247 else if ( uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6248 && ( fBlockMovSS
6249 || fBlockSti))
6250 {
6251 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
6252 fInject = false;
6253 }
6254
6255 if (fInject)
6256 {
6257 Log4(("Injecting pending event vcpu[%RU32]\n", pVCpu->idCpu));
6258 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.cbInstr,
6259 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, &uIntrState);
6260 AssertRCReturn(rc, rc);
6261 pVCpu->hm.s.Event.fPending = false;
6262
6263#ifdef VBOX_WITH_STATISTICS
6264 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
6265 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
6266 else
6267 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
6268#endif
6269 }
6270 else
6271 hmR0VmxSetIntWindowExitVmcs(pVCpu);
6272 } /** @todo SMI. SMIs take priority over NMIs. */
6273 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */
6274 {
6275 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
6276 if ( !fBlockMovSS
6277 && !fBlockSti)
6278 {
6279 Log4(("Injecting NMI\n"));
6280 uint32_t u32IntrInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID;
6281 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6282 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */,
6283 0 /* GCPtrFaultAddress */, &uIntrState);
6284 AssertRCReturn(rc, rc);
6285 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
6286
6287 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
6288 }
6289 else
6290 hmR0VmxSetIntWindowExitVmcs(pVCpu);
6291 }
6292 else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
6293 {
6294 /* Check if there are guest external interrupts (PIC/APIC) pending and inject them if the guest can receive them. */
6295 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6296 AssertRCReturn(rc, rc);
6297 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
6298 if ( !fBlockInt
6299 && !fBlockSti
6300 && !fBlockMovSS)
6301 {
6302 uint8_t u8Interrupt;
6303 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
6304 if (RT_SUCCESS(rc))
6305 {
6306 Log4(("Injecting interrupt u8Interrupt=%#x\n", u8Interrupt));
6307 uint32_t u32IntrInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID;
6308 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6309 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */,
6310 0 /* GCPtrFaultAddress */, &uIntrState);
6311
6312 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
6313 }
6314 else
6315 {
6316 /** @todo Does this actually happen? If not turn it into an assertion. */
6317 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
6318 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
6319 rc = VINF_SUCCESS;
6320 }
6321 }
6322 else
6323 hmR0VmxSetIntWindowExitVmcs(pVCpu);
6324 }
6325
6326 /*
6327 * Delivery pending debug exception if the guest is single-stepping. The interruptibility-state could have been changed by
6328 * hmR0VmxInjectEventVmcs() (e.g. real-on-v86 injecting software interrupts), re-evaluate it and set the BS bit.
6329 */
6330 fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
6331 fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
6332 int rc2 = VINF_SUCCESS;
6333 if ( fBlockSti
6334 || fBlockMovSS)
6335 {
6336 if (!DBGFIsStepping(pVCpu))
6337 {
6338 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS);
6339 if (pMixedCtx->eflags.Bits.u1TF) /* We don't have any IA32_DEBUGCTL MSR for guests. Treat as all bits 0. */
6340 {
6341 /*
6342 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD,
6343 * VMX_EXIT_MTF, VMX_EXIT_APIC_WRITE and VMX_EXIT_VIRTUALIZED_EOI.
6344 * See Intel spec. 27.3.4 "Saving Non-Register State".
6345 */
6346 rc2 = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
6347 AssertRCReturn(rc, rc);
6348 }
6349 }
6350 else
6351 {
6352 /* We are single-stepping in the hypervisor debugger, clear interrupt inhibition as setting the BS bit would mean
6353 delivering a #DB to the guest upon VM-entry when it shouldn't be. */
6354 uIntrState = 0;
6355 }
6356 }
6357
6358 /*
6359 * There's no need to clear the VM entry-interruption information field here if we're not injecting anything.
6360 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6361 */
6362 rc2 = hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);
6363 AssertRC(rc2);
6364
6365 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
6366 return rc;
6367}
6368
6369
6370/**
6371 * Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM.
6372 *
6373 * @param pVCpu Pointer to the VMCPU.
6374 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6375 * out-of-sync. Make sure to update the required fields
6376 * before using them.
6377 */
6378DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6379{
6380 uint32_t u32IntrInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID;
6381 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6382}
6383
6384
6385/**
6386 * Injects a double-fault (#DF) exception into the VM.
6387 *
6388 * @returns VBox status code (informational status code included).
6389 * @param pVCpu Pointer to the VMCPU.
6390 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6391 * out-of-sync. Make sure to update the required fields
6392 * before using them.
6393 */
6394DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t *puIntrState)
6395{
6396 uint32_t u32IntrInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
6397 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6398 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6399 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
6400 puIntrState);
6401}
6402
6403
6404/**
6405 * Sets a debug (#DB) exception as pending-for-injection into the VM.
6406 *
6407 * @param pVCpu Pointer to the VMCPU.
6408 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6409 * out-of-sync. Make sure to update the required fields
6410 * before using them.
6411 */
6412DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6413{
6414 uint32_t u32IntrInfo = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID;
6415 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6416 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6417}
6418
6419
6420/**
6421 * Sets an overflow (#OF) exception as pending-for-injection into the VM.
6422 *
6423 * @param pVCpu Pointer to the VMCPU.
6424 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6425 * out-of-sync. Make sure to update the required fields
6426 * before using them.
6427 * @param cbInstr The value of RIP that is to be pushed on the guest
6428 * stack.
6429 */
6430DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
6431{
6432 uint32_t u32IntrInfo = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID;
6433 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6434 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6435}
6436
6437
6438/**
6439 * Injects a general-protection (#GP) fault into the VM.
6440 *
6441 * @returns VBox status code (informational status code included).
6442 * @param pVCpu Pointer to the VMCPU.
6443 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6444 * out-of-sync. Make sure to update the required fields
6445 * before using them.
6446 * @param u32ErrorCode The error code associated with the #GP.
6447 */
6448DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
6449 uint32_t *puIntrState)
6450{
6451 uint32_t u32IntrInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
6452 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6453 if (fErrorCodeValid)
6454 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6455 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
6456 puIntrState);
6457}
6458
6459
6460/**
6461 * Sets a software interrupt (INTn) as pending-for-injection into the VM.
6462 *
6463 * @param pVCpu Pointer to the VMCPU.
6464 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6465 * out-of-sync. Make sure to update the required fields
6466 * before using them.
6467 * @param uVector The software interrupt vector number.
6468 * @param cbInstr The value of RIP that is to be pushed on the guest
6469 * stack.
6470 */
6471DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
6472{
6473 uint32_t u32IntrInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
6474 if ( uVector == X86_XCPT_BP
6475 || uVector == X86_XCPT_OF)
6476 {
6477 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6478 }
6479 else
6480 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6481 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6482}
6483
6484
6485/**
6486 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
6487 * stack.
6488 *
6489 * @returns VBox status code (information status code included).
6490 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
6491 * @param pVM Pointer to the VM.
6492 * @param pMixedCtx Pointer to the guest-CPU context.
6493 * @param uValue The value to push to the guest stack.
6494 */
6495DECLINLINE(int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
6496{
6497 /*
6498 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
6499 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
6500 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
6501 */
6502 if (pMixedCtx->sp == 1)
6503 return VINF_EM_RESET;
6504 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
6505 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
6506 AssertRCReturn(rc, rc);
6507 return rc;
6508}
6509
6510
6511/**
6512 * Injects an event into the guest upon VM-entry by updating the relevant fields
6513 * in the VM-entry area in the VMCS.
6514 *
6515 * @returns VBox status code (informational error codes included).
6516 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
6517 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
6518 *
6519 * @param pVCpu Pointer to the VMCPU.
6520 * @param pMixedCtx Pointer to the guest-CPU context. The data may
6521 * be out-of-sync. Make sure to update the required
6522 * fields before using them.
6523 * @param u64IntrInfo The VM-entry interruption-information field.
6524 * @param cbInstr The VM-entry instruction length in bytes (for
6525 * software interrupts, exceptions and privileged
6526 * software exceptions).
6527 * @param u32ErrCode The VM-entry exception error code.
6528 * @param GCPtrFaultAddress The page-fault address for #PF exceptions.
6529 * @param puIntrState Pointer to the current guest interruptibility-state.
6530 * This interruptibility-state will be updated if
6531 * necessary. This cannot not be NULL.
6532 *
6533 * @remarks No-long-jump zone!!!
6534 * @remarks Requires CR0!
6535 */
6536static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
6537 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState)
6538{
6539 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
6540 AssertMsg(u64IntrInfo >> 32 == 0, ("%#RX64\n", u64IntrInfo));
6541 Assert(puIntrState);
6542 uint32_t u32IntrInfo = (uint32_t)u64IntrInfo;
6543
6544 const uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntrInfo);
6545 const uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo);
6546
6547#ifdef VBOX_STRICT
6548 /* Validate the error-code-valid bit for hardware exceptions. */
6549 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT)
6550 {
6551 switch (uVector)
6552 {
6553 case X86_XCPT_PF:
6554 case X86_XCPT_DF:
6555 case X86_XCPT_TS:
6556 case X86_XCPT_NP:
6557 case X86_XCPT_SS:
6558 case X86_XCPT_GP:
6559 case X86_XCPT_AC:
6560 AssertMsg(VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntrInfo),
6561 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
6562 /* fallthru */
6563 default:
6564 break;
6565 }
6566 }
6567#endif
6568
6569 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
6570 Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6571 || !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
6572
6573 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
6574
6575 /* We require CR0 to check if the guest is in real-mode. */
6576 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6577 AssertRCReturn(rc, rc);
6578
6579 /*
6580 * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
6581 * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest.
6582 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
6583 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
6584 */
6585 if (CPUMIsGuestInRealModeEx(pMixedCtx))
6586 {
6587 PVM pVM = pVCpu->CTX_SUFF(pVM);
6588 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
6589 {
6590 Assert(PDMVmmDevHeapIsEnabled(pVM));
6591 Assert(pVM->hm.s.vmx.pRealModeTSS);
6592
6593 /* We require RIP, RSP, RFLAGS, CS, IDTR. Save the required ones from the VMCS. */
6594 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6595 rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
6596 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
6597 AssertRCReturn(rc, rc);
6598 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP);
6599
6600 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
6601 const size_t cbIdtEntry = 4;
6602 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
6603 {
6604 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
6605 if (uVector == X86_XCPT_DF)
6606 return VINF_EM_RESET;
6607 else if (uVector == X86_XCPT_GP)
6608 {
6609 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
6610 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, puIntrState);
6611 }
6612
6613 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
6614 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
6615 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, puIntrState);
6616 }
6617
6618 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
6619 uint16_t uGuestIp = pMixedCtx->ip;
6620 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
6621 {
6622 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
6623 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
6624 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
6625 }
6626 else if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
6627 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
6628
6629 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
6630 uint16_t offIdtEntry = 0;
6631 RTSEL selIdtEntry = 0;
6632 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
6633 rc = PGMPhysSimpleReadGCPhys(pVM, &offIdtEntry, GCPhysIdtEntry, sizeof(offIdtEntry));
6634 rc |= PGMPhysSimpleReadGCPhys(pVM, &selIdtEntry, GCPhysIdtEntry + 2, sizeof(selIdtEntry));
6635 AssertRCReturn(rc, rc);
6636
6637 /* Construct the stack frame for the interrupt/exception handler. */
6638 rc = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
6639 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
6640 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
6641 AssertRCReturn(rc, rc);
6642
6643 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
6644 if (rc == VINF_SUCCESS)
6645 {
6646 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
6647 pMixedCtx->rip = offIdtEntry;
6648 pMixedCtx->cs.Sel = selIdtEntry;
6649 pMixedCtx->cs.u64Base = selIdtEntry << cbIdtEntry;
6650 if ( uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
6651 && uVector == X86_XCPT_PF)
6652 {
6653 pMixedCtx->cr2 = GCPtrFaultAddress;
6654 }
6655 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS
6656 | HM_CHANGED_GUEST_RIP
6657 | HM_CHANGED_GUEST_RFLAGS
6658 | HM_CHANGED_GUEST_RSP;
6659
6660 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
6661 if (*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
6662 {
6663 Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6664 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
6665 Log4(("Clearing inhibition due to STI.\n"));
6666 *puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
6667 }
6668 Log4(("Injecting real-mode: u32IntrInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntrInfo, u32ErrCode, cbInstr));
6669 }
6670 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
6671 return rc;
6672 }
6673 else
6674 {
6675 /*
6676 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
6677 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
6678 */
6679 u32IntrInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6680 }
6681 }
6682
6683 /* Validate. */
6684 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntrInfo)); /* Bit 31 (Valid bit) must be set by caller. */
6685 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK(u32IntrInfo)); /* Bit 12 MBZ. */
6686 Assert(!(u32IntrInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
6687
6688 /* Inject. */
6689 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntrInfo);
6690 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntrInfo))
6691 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
6692 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
6693
6694 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
6695 && uVector == X86_XCPT_PF)
6696 {
6697 pMixedCtx->cr2 = GCPtrFaultAddress;
6698 }
6699
6700 Log4(("Injecting vcpu[%RU32] u32IntrInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", pVCpu->idCpu,
6701 u32IntrInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
6702
6703 AssertRCReturn(rc, rc);
6704 return rc;
6705}
6706
6707
6708/**
6709 * Enters the VT-x session.
6710 *
6711 * @returns VBox status code.
6712 * @param pVM Pointer to the VM.
6713 * @param pVCpu Pointer to the VMCPU.
6714 * @param pCpu Pointer to the CPU info struct.
6715 */
6716VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
6717{
6718 AssertPtr(pVM);
6719 AssertPtr(pVCpu);
6720 Assert(pVM->hm.s.vmx.fSupported);
6721 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6722 NOREF(pCpu);
6723
6724 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6725
6726#ifdef VBOX_STRICT
6727 /* Make sure we're in VMX root mode. */
6728 RTCCUINTREG u32HostCR4 = ASMGetCR4();
6729 if (!(u32HostCR4 & X86_CR4_VMXE))
6730 {
6731 LogRel(("VMXR0Enter: X86_CR4_VMXE bit in CR4 is not set!\n"));
6732 return VERR_VMX_X86_CR4_VMXE_CLEARED;
6733 }
6734#endif
6735
6736 /* Load the active VMCS as the current one. */
6737 int rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
6738 if (RT_FAILURE(rc))
6739 return rc;
6740
6741 /** @todo this will change with preemption hooks where can VMRESUME as long
6742 * as we're no preempted. */
6743 pVCpu->hm.s.fResumeVM = false;
6744 return VINF_SUCCESS;
6745}
6746
6747
6748/**
6749 * Leaves the VT-x session.
6750 *
6751 * @returns VBox status code.
6752 * @param pVM Pointer to the VM.
6753 * @param pVCpu Pointer to the VMCPU.
6754 * @param pCtx Pointer to the guest-CPU context.
6755 */
6756VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
6757{
6758 AssertPtr(pVCpu);
6759 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6760 NOREF(pVM);
6761 NOREF(pCtx);
6762
6763 /** @todo this will change with preemption hooks where we only VMCLEAR when
6764 * we are actually going to be preempted, not all the time like we
6765 * currently do. */
6766
6767 /* Restore host-state bits that VT-x only restores partially. */
6768 if (pVCpu->hm.s.vmx.fRestoreHostFlags)
6769 {
6770 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
6771 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
6772 }
6773
6774 /*
6775 * Sync the current VMCS (writes back internal data back into the VMCS region in memory)
6776 * and mark the VMCS launch-state as "clear".
6777 */
6778 int rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
6779 return rc;
6780}
6781
6782
6783/**
6784 * Saves the host state in the VMCS host-state.
6785 * Sets up the VM-exit MSR-load area.
6786 *
6787 * The CPU state will be loaded from these fields on every successful VM-exit.
6788 *
6789 * @returns VBox status code.
6790 * @param pVM Pointer to the VM.
6791 * @param pVCpu Pointer to the VMCPU.
6792 *
6793 * @remarks No-long-jump zone!!!
6794 */
6795VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
6796{
6797 AssertPtr(pVM);
6798 AssertPtr(pVCpu);
6799 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6800
6801 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6802
6803 /* Nothing to do if the host-state-changed flag isn't set. This will later be optimized when preemption hooks are in place. */
6804 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT))
6805 return VINF_SUCCESS;
6806
6807 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
6808 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6809
6810 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
6811 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6812
6813 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
6814 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6815
6816 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;
6817 return rc;
6818}
6819
6820
6821/**
6822 * Loads the guest state into the VMCS guest-state area. The CPU state will be
6823 * loaded from these fields on every successful VM-entry.
6824 *
6825 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas.
6826 * Sets up the VM-entry controls.
6827 * Sets up the appropriate VMX non-root function to execute guest code based on
6828 * the guest CPU mode.
6829 *
6830 * @returns VBox status code.
6831 * @param pVM Pointer to the VM.
6832 * @param pVCpu Pointer to the VMCPU.
6833 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6834 * out-of-sync. Make sure to update the required fields
6835 * before using them.
6836 *
6837 * @remarks No-long-jump zone!!!
6838 */
6839static int hmR0VmxLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6840{
6841 AssertPtr(pVM);
6842 AssertPtr(pVCpu);
6843 AssertPtr(pMixedCtx);
6844 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6845
6846#ifdef LOG_ENABLED
6847 /** @todo r=ramshankar: I'm not able to use VMMRZCallRing3Disable() here,
6848 * probably not initialized yet? Anyway this will do for now. */
6849 bool fCallerDisabledLogFlush = VMMR0IsLogFlushDisabled(pVCpu);
6850 VMMR0LogFlushDisable(pVCpu);
6851#endif
6852
6853 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6854
6855 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
6856
6857 /* Determine real-on-v86 mode. */
6858 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
6859 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
6860 && CPUMIsGuestInRealModeEx(pMixedCtx))
6861 {
6862 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
6863 }
6864
6865 /*
6866 * Load the guest-state into the VMCS.
6867 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
6868 * Ideally, assert that the cross-dependent bits are up to date at the point of using it.
6869 */
6870 int rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
6871 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6872
6873 rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
6874 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6875
6876 rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
6877 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6878
6879 rc = hmR0VmxLoadGuestControlRegs(pVCpu, pMixedCtx);
6880 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestControlRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6881
6882 /* Must be done after CR0 is loaded (strict builds require CR0 for segment register validation checks). */
6883 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
6884 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6885
6886 rc = hmR0VmxLoadGuestDebugState(pVCpu, pMixedCtx);
6887 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestDebugState: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6888
6889 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
6890 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6891
6892 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
6893 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6894
6895 /* Must be done after hmR0VmxLoadGuestDebugState() as it may have updated eflags.TF for debugging purposes. */
6896 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
6897 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestRipRspRflags! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6898
6899 rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
6900 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6901
6902 /* Clear any unused and reserved bits. */
6903 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2;
6904
6905 AssertMsg(!pVCpu->hm.s.fContextUseFlags,
6906 ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p idCpu=%RU32 fContextUseFlags=%#RX32\n",
6907 pVM, pVCpu, pVCpu->idCpu, pVCpu->hm.s.fContextUseFlags));
6908
6909#ifdef LOG_ENABLED
6910 /* Only reenable log-flushing if the caller has it enabled. */
6911 if (!fCallerDisabledLogFlush)
6912 VMMR0LogFlushEnable(pVCpu);
6913#endif
6914
6915 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
6916 return rc;
6917}
6918
6919
6920/**
6921 * Loads the guest state into the VMCS guest-state area.
6922 *
6923 * @returns VBox status code.
6924 * @param pVM Pointer to the VM.
6925 * @param pVCpu Pointer to the VMCPU.
6926 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6927 * out-of-sync. Make sure to update the required fields
6928 * before using them.
6929 *
6930 * @remarks No-long-jump zone!!!
6931 */
6932VMMR0DECL(int) VMXR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6933{
6934 /*
6935 * Avoid reloading the guest state on longjmp reentrants and do it lazily just before executing the guest.
6936 * This only helps when we get rescheduled more than once to a different host CPU on a longjmp trip before
6937 * finally executing guest code.
6938 */
6939 return VINF_SUCCESS;
6940}
6941
6942
6943/**
6944 * Does the preparations before executing guest code in VT-x.
6945 *
6946 * This may cause longjmps to ring-3 and may even result in rescheduling to the
6947 * recompiler. We must be cautious what we do here regarding committing
6948 * guest-state information into the VMCS assuming we assuredly execute the
6949 * guest in VT-x. If we fall back to the recompiler after updating the VMCS and
6950 * clearing the common-state (TRPM/forceflags), we must undo those changes so
6951 * that the recompiler can (and should) use them when it resumes guest
6952 * execution. Otherwise such operations must be done when we can no longer
6953 * exit to ring-3.
6954 *
6955 * @returns VBox status code (informational status codes included).
6956 * @retval VINF_SUCCESS if we can proceed with running the guest.
6957 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a double-fault
6958 * into the guest.
6959 * @retval VINF_* scheduling changes, we have to go back to ring-3.
6960 *
6961 * @param pVM Pointer to the VM.
6962 * @param pVCpu Pointer to the VMCPU.
6963 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6964 * out-of-sync. Make sure to update the required fields
6965 * before using them.
6966 * @param pVmxTransient Pointer to the VMX transient structure.
6967 *
6968 * @remarks Called with preemption disabled.
6969 */
6970static int hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6971{
6972 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6973
6974#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
6975 PGMRZDynMapFlushAutoSet(pVCpu);
6976#endif
6977
6978 /* Check force flag actions that might require us to go back to ring-3. */
6979 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx);
6980 if (rc != VINF_SUCCESS)
6981 return rc;
6982
6983 /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
6984 if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
6985 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
6986 {
6987 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
6988 RTGCPHYS GCPhysApicBase;
6989 GCPhysApicBase = pMixedCtx->msrApicBase;
6990 GCPhysApicBase &= PAGE_BASE_GC_MASK;
6991
6992 /* Unalias any existing mapping. */
6993 rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
6994 AssertRCReturn(rc, rc);
6995
6996 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
6997 Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));
6998 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
6999 AssertRCReturn(rc, rc);
7000
7001 pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
7002 }
7003
7004#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
7005 /* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */
7006 pVmxTransient->uEFlags = ASMIntDisableFlags();
7007 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
7008 {
7009 ASMSetFlags(pVmxTransient->uEFlags);
7010 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
7011 /* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */
7012 return VINF_EM_RAW_INTERRUPT;
7013 }
7014 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
7015 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
7016#endif
7017
7018 /*
7019 * Evaluates and injects any pending events, toggling force-flags and updating the guest-interruptibility
7020 * state (interrupt shadow) in the VMCS. This -can- potentially be reworked to be done before disabling
7021 * interrupts and handle returning to ring-3 afterwards, but requires very careful state restoration.
7022 */
7023 /** @todo Rework event evaluation and injection to be completely separate. */
7024 if (TRPMHasTrap(pVCpu))
7025 hmR0VmxTrpmTrapToPendingEvent(pVCpu);
7026
7027 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx);
7028 AssertRCReturn(rc, rc);
7029 return rc;
7030}
7031
7032
7033/**
7034 * Prepares to run guest code in VT-x and we've committed to doing so. This
7035 * means there is no backing out to ring-3 or anywhere else at this
7036 * point.
7037 *
7038 * @param pVM Pointer to the VM.
7039 * @param pVCpu Pointer to the VMCPU.
7040 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7041 * out-of-sync. Make sure to update the required fields
7042 * before using them.
7043 * @param pVmxTransient Pointer to the VMX transient structure.
7044 *
7045 * @remarks Called with preemption disabled.
7046 * @remarks No-long-jump zone!!!
7047 */
7048static void hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7049{
7050 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7051 Assert(VMMR0IsLogFlushDisabled(pVCpu));
7052
7053#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
7054 /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */
7055 pVmxTransient->uEFlags = ASMIntDisableFlags();
7056 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
7057#endif
7058
7059 /* Load the required guest state bits (for guest-state changes in the inner execution loop). */
7060 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT));
7061 Log5(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));
7062#ifdef HMVMX_SYNC_FULL_GUEST_STATE
7063 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
7064#endif
7065 int rc = VINF_SUCCESS;
7066 if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP)
7067 {
7068 rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
7069 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
7070 }
7071 else if (pVCpu->hm.s.fContextUseFlags)
7072 {
7073 rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx);
7074 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
7075 }
7076 AssertRC(rc);
7077 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
7078
7079#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
7080 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);
7081 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
7082 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
7083#endif
7084
7085 /* Cache the TPR-shadow for checking on every VM-exit if it might have changed. */
7086 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
7087 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
7088
7089 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
7090 || HMR0GetCurrentCpu()->idCpu != pVCpu->hm.s.idLastCpu)
7091 {
7092 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pMixedCtx);
7093 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
7094 }
7095
7096 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
7097 hmR0VmxFlushTaggedTlb(pVCpu); /* Invalidate the appropriate guest entries from the TLB. */
7098 Assert(HMR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu);
7099
7100 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
7101
7102 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
7103 to start executing. */
7104
7105#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
7106 /*
7107 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
7108 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
7109 */
7110 if ( (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
7111 && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
7112 {
7113 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
7114 uint64_t u64HostTscAux = 0;
7115 int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux);
7116 AssertRC(rc2);
7117 ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux);
7118 }
7119#endif
7120}
7121
7122
7123/**
7124 * Performs some essential restoration of state after running guest code in
7125 * VT-x.
7126 *
7127 * @param pVM Pointer to the VM.
7128 * @param pVCpu Pointer to the VMCPU.
7129 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
7130 * out-of-sync. Make sure to update the required fields
7131 * before using them.
7132 * @param pVmxTransient Pointer to the VMX transient structure.
7133 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
7134 *
7135 * @remarks Called with interrupts disabled.
7136 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
7137 * unconditionally when it is safe to do so.
7138 */
7139static void hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
7140{
7141 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7142
7143 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
7144 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
7145 pVCpu->hm.s.vmx.fUpdatedGuestState = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */
7146 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
7147 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
7148
7149 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
7150 {
7151#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
7152 /* Restore host's TSC_AUX. */
7153 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
7154 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
7155#endif
7156 /** @todo Find a way to fix hardcoding a guestimate. */
7157 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC()
7158 + pVCpu->hm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
7159 }
7160
7161 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
7162 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
7163 Assert(!(ASMGetFlags() & X86_EFL_IF));
7164 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
7165
7166 ASMSetFlags(pVmxTransient->uEFlags); /* Enable interrupts. */
7167 pVCpu->hm.s.fResumeVM = true; /* Use VMRESUME instead of VMLAUNCH in the next run. */
7168
7169 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
7170 uint32_t uExitReason;
7171 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
7172 rc |= hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
7173 AssertRC(rc);
7174 pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
7175 pVmxTransient->fVMEntryFailed = !!VMX_ENTRY_INTERRUPTION_INFO_VALID(pVmxTransient->uEntryIntrInfo);
7176
7177 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pMixedCtx);
7178 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
7179
7180 /* If the VMLAUNCH/VMRESUME failed, we can bail out early. This does -not- cover VMX_EXIT_ERR_*. */
7181 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
7182 {
7183 Log4(("VM-entry failure: pVCpu=%p idCpu=%RU32 rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", pVCpu, pVCpu->idCpu, rcVMRun,
7184 pVmxTransient->fVMEntryFailed));
7185 return;
7186 }
7187
7188 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
7189 {
7190 /* Update the guest interruptibility-state from the VMCS. */
7191 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
7192#if defined(HMVMX_SYNC_FULL_GUEST_STATE) || defined(HMVMX_SAVE_FULL_GUEST_STATE)
7193 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
7194 AssertRC(rc);
7195#endif
7196 /*
7197 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
7198 * we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3 and which is
7199 * why it's done here as it's easier and no less efficient to deal with it here than making hmR0VmxSaveGuestState()
7200 * cope with longjmps safely (see VMCPU_FF_HM_UPDATE_CR3 handling).
7201 */
7202 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
7203 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
7204 {
7205 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
7206 AssertRC(rc);
7207 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
7208 }
7209 }
7210}
7211
7212
7213/**
7214 * Runs the guest code using VT-x.
7215 *
7216 * @returns VBox status code.
7217 * @param pVM Pointer to the VM.
7218 * @param pVCpu Pointer to the VMCPU.
7219 * @param pCtx Pointer to the guest-CPU context.
7220 *
7221 * @remarks Called with preemption disabled.
7222 */
7223VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
7224{
7225 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7226 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7227
7228 VMXTRANSIENT VmxTransient;
7229 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
7230 int rc = VERR_INTERNAL_ERROR_5;
7231 uint32_t cLoops = 0;
7232
7233 for (;; cLoops++)
7234 {
7235 Assert(!HMR0SuspendPending());
7236 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
7237 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
7238 (unsigned)RTMpCpuId(), cLoops));
7239
7240 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
7241 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
7242 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
7243 if (rc != VINF_SUCCESS)
7244 break;
7245
7246 /*
7247 * No longjmps to ring-3 from this point on!!!
7248 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
7249 * This also disables flushing of the R0-logger instance (if any).
7250 */
7251 VMMRZCallRing3Disable(pVCpu);
7252 VMMRZCallRing3RemoveNotification(pVCpu);
7253 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
7254
7255 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
7256 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
7257
7258 /*
7259 * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
7260 * This will also re-enable longjmps to ring-3 when it has reached a safe point!!!
7261 */
7262 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
7263 if (RT_UNLIKELY(rc != VINF_SUCCESS)) /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
7264 {
7265 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
7266 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
7267 return rc;
7268 }
7269
7270 /* Handle the VM-exit. */
7271 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
7272 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
7273 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
7274 HMVMX_START_EXIT_DISPATCH_PROF();
7275#ifdef HMVMX_USE_FUNCTION_TABLE
7276 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
7277#else
7278 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
7279#endif
7280 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
7281 if (rc != VINF_SUCCESS)
7282 break;
7283 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
7284 {
7285 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
7286 rc = VINF_EM_RAW_INTERRUPT;
7287 break;
7288 }
7289 }
7290
7291 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
7292 if (rc == VERR_EM_INTERPRETER)
7293 rc = VINF_EM_RAW_EMULATE_INSTR;
7294 else if (rc == VINF_EM_RESET)
7295 rc = VINF_EM_TRIPLE_FAULT;
7296 hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rc);
7297 return rc;
7298}
7299
7300
7301#ifndef HMVMX_USE_FUNCTION_TABLE
7302DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
7303{
7304 int rc;
7305 switch (rcReason)
7306 {
7307 case VMX_EXIT_EPT_MISCONFIG: rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient); break;
7308 case VMX_EXIT_EPT_VIOLATION: rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient); break;
7309 case VMX_EXIT_IO_INSTR: rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient); break;
7310 case VMX_EXIT_CPUID: rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient); break;
7311 case VMX_EXIT_RDTSC: rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient); break;
7312 case VMX_EXIT_RDTSCP: rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient); break;
7313 case VMX_EXIT_APIC_ACCESS: rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient); break;
7314 case VMX_EXIT_XCPT_OR_NMI: rc = hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient); break;
7315 case VMX_EXIT_MOV_CRX: rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient); break;
7316 case VMX_EXIT_EXT_INT: rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient); break;
7317 case VMX_EXIT_INT_WINDOW: rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient); break;
7318 case VMX_EXIT_MWAIT: rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient); break;
7319 case VMX_EXIT_MONITOR: rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient); break;
7320 case VMX_EXIT_TASK_SWITCH: rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient); break;
7321 case VMX_EXIT_PREEMPT_TIMER: rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient); break;
7322 case VMX_EXIT_RDMSR: rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient); break;
7323 case VMX_EXIT_WRMSR: rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient); break;
7324 case VMX_EXIT_MOV_DRX: rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient); break;
7325 case VMX_EXIT_TPR_BELOW_THRESHOLD: rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); break;
7326 case VMX_EXIT_HLT: rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient); break;
7327 case VMX_EXIT_INVD: rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient); break;
7328 case VMX_EXIT_INVLPG: rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient); break;
7329 case VMX_EXIT_RSM: rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient); break;
7330 case VMX_EXIT_MTF: rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient); break;
7331 case VMX_EXIT_PAUSE: rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient); break;
7332 case VMX_EXIT_XDTR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
7333 case VMX_EXIT_TR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
7334 case VMX_EXIT_WBINVD: rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient); break;
7335 case VMX_EXIT_XSETBV: rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient); break;
7336 case VMX_EXIT_RDRAND: rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient); break;
7337 case VMX_EXIT_INVPCID: rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient); break;
7338 case VMX_EXIT_GETSEC: rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient); break;
7339 case VMX_EXIT_RDPMC: rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient); break;
7340
7341 case VMX_EXIT_TRIPLE_FAULT: rc = hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); break;
7342 case VMX_EXIT_NMI_WINDOW: rc = hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient); break;
7343 case VMX_EXIT_INIT_SIGNAL: rc = hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient); break;
7344 case VMX_EXIT_SIPI: rc = hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient); break;
7345 case VMX_EXIT_IO_SMI: rc = hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient); break;
7346 case VMX_EXIT_SMI: rc = hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient); break;
7347 case VMX_EXIT_ERR_MSR_LOAD: rc = hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient); break;
7348 case VMX_EXIT_ERR_INVALID_GUEST_STATE: rc = hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient); break;
7349 case VMX_EXIT_ERR_MACHINE_CHECK: rc = hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient); break;
7350
7351 case VMX_EXIT_VMCALL:
7352 case VMX_EXIT_VMCLEAR:
7353 case VMX_EXIT_VMLAUNCH:
7354 case VMX_EXIT_VMPTRLD:
7355 case VMX_EXIT_VMPTRST:
7356 case VMX_EXIT_VMREAD:
7357 case VMX_EXIT_VMRESUME:
7358 case VMX_EXIT_VMWRITE:
7359 case VMX_EXIT_VMXOFF:
7360 case VMX_EXIT_VMXON:
7361 case VMX_EXIT_INVEPT:
7362 case VMX_EXIT_INVVPID:
7363 case VMX_EXIT_VMFUNC:
7364 rc = hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
7365 break;
7366 default:
7367 rc = hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
7368 break;
7369 }
7370 return rc;
7371}
7372#endif
7373
7374#ifdef DEBUG
7375/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
7376# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
7377 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
7378
7379# define HMVMX_ASSERT_PREEMPT_CPUID() \
7380 do \
7381 { \
7382 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
7383 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
7384 } while (0)
7385
7386# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
7387 do { \
7388 AssertPtr(pVCpu); \
7389 AssertPtr(pMixedCtx); \
7390 AssertPtr(pVmxTransient); \
7391 Assert(pVmxTransient->fVMEntryFailed == false); \
7392 Assert(ASMIntAreEnabled()); \
7393 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
7394 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
7395 Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", pVCpu->idCpu)); \
7396 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
7397 if (VMMR0IsLogFlushDisabled(pVCpu)) \
7398 HMVMX_ASSERT_PREEMPT_CPUID(); \
7399 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
7400 } while (0)
7401
7402# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
7403 do { \
7404 Log4Func(("\n")); \
7405 } while(0)
7406#else /* Release builds */
7407# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() do { HMVMX_STOP_EXIT_DISPATCH_PROF(); } while(0)
7408# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while(0)
7409#endif
7410
7411
7412/**
7413 * Advances the guest RIP after reading it from the VMCS.
7414 *
7415 * @returns VBox status code.
7416 * @param pVCpu Pointer to the VMCPU.
7417 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
7418 * out-of-sync. Make sure to update the required fields
7419 * before using them.
7420 * @param pVmxTransient Pointer to the VMX transient structure.
7421 *
7422 * @remarks No-long-jump zone!!!
7423 */
7424DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7425{
7426 int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7427 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
7428 AssertRCReturn(rc, rc);
7429
7430 pMixedCtx->rip += pVmxTransient->cbInstr;
7431 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7432 return rc;
7433}
7434
7435
7436/**
7437 * Tries to determine what part of the guest-state VT-x has deemed as invalid
7438 * and update error record fields accordingly.
7439 *
7440 * @return VMX_IGS_* return codes.
7441 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
7442 * wrong with the guest state.
7443 *
7444 * @param pVM Pointer to the VM.
7445 * @param pVCpu Pointer to the VMCPU.
7446 * @param pCtx Pointer to the guest-CPU state.
7447 */
7448static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
7449{
7450#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
7451#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { \
7452 uError = (err); \
7453 break; \
7454 } else do {} while (0)
7455/* Duplicate of IEM_IS_CANONICAL(). */
7456#define HMVMX_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
7457
7458 int rc;
7459 uint64_t u64Val;
7460 uint32_t u32Val;
7461 uint32_t uError = VMX_IGS_ERROR;
7462 bool fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
7463
7464 do
7465 {
7466 /*
7467 * CR0.
7468 */
7469 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 & pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
7470 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 | pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
7471 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG).
7472 See Intel spec. 26.3.1 "Checks on guest Guest Control Registers, Debug Registers and MSRs." */
7473 if (fUnrestrictedGuest)
7474 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
7475
7476 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val);
7477 AssertRCBreak(rc);
7478 HMVMX_CHECK_BREAK((u32Val & uSetCR0) == uSetCR0, VMX_IGS_CR0_FIXED1);
7479 HMVMX_CHECK_BREAK(!(u32Val & ~uZapCR0), VMX_IGS_CR0_FIXED0);
7480 if ( !fUnrestrictedGuest
7481 && (u32Val & X86_CR0_PG)
7482 && !(u32Val & X86_CR0_PE))
7483 {
7484 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
7485 }
7486
7487 /*
7488 * CR4.
7489 */
7490 uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 & pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
7491 uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 | pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
7492 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32Val);
7493 AssertRCBreak(rc);
7494 HMVMX_CHECK_BREAK((u32Val & uSetCR4) == uSetCR4, VMX_IGS_CR4_FIXED1);
7495 HMVMX_CHECK_BREAK(!(u32Val & ~uZapCR4), VMX_IGS_CR4_FIXED0);
7496
7497 /*
7498 * IA32_DEBUGCTL MSR.
7499 */
7500 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
7501 AssertRCBreak(rc);
7502 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
7503 && (u64Val & 0xfffffe3c)) /* Bits 31-9, bits 2-5 MBZ. */
7504 {
7505 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
7506 }
7507 uint64_t u64DebugCtlMsr = u64Val;
7508
7509#ifdef VBOX_STRICT
7510 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
7511 AssertRCBreak(rc);
7512 Assert(u32Val == pVCpu->hm.s.vmx.u32ProcCtls);
7513#endif
7514 const bool fLongModeGuest = !!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);
7515
7516 /*
7517 * RIP and RFLAGS.
7518 */
7519 uint32_t u32EFlags;
7520#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
7521 if (HMVMX_IS_64BIT_HOST_MODE())
7522 {
7523 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
7524 AssertRCBreak(rc);
7525 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
7526 if ( !fLongModeGuest
7527 || !pCtx->cs.Attr.n.u1Long)
7528 {
7529 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
7530 }
7531 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
7532 * must be identical if the "IA32e mode guest" VM-entry control is 1
7533 * and CS.L is 1. No check applies if the CPU supports 64
7534 * linear-address bits. */
7535
7536 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
7537 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
7538 AssertRCBreak(rc);
7539 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
7540 VMX_IGS_RFLAGS_RESERVED);
7541 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
7542 u32EFlags = u64Val;
7543 }
7544 else
7545#endif
7546 {
7547 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32EFlags);
7548 AssertRCBreak(rc);
7549 HMVMX_CHECK_BREAK(!(u32EFlags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */
7550 HMVMX_CHECK_BREAK((u32EFlags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
7551 }
7552
7553 if ( fLongModeGuest
7554 || !(pCtx->cr0 & X86_CR0_PE))
7555 {
7556 HMVMX_CHECK_BREAK(!(u32EFlags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
7557 }
7558
7559 uint32_t u32EntryInfo;
7560 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
7561 AssertRCBreak(rc);
7562 if ( VMX_ENTRY_INTERRUPTION_INFO_VALID(u32EntryInfo)
7563 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7564 {
7565 HMVMX_CHECK_BREAK(u32Val & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
7566 }
7567
7568 /*
7569 * 64-bit checks.
7570 */
7571#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
7572 if (HMVMX_IS_64BIT_HOST_MODE())
7573 {
7574 if ( fLongModeGuest
7575 && !fUnrestrictedGuest)
7576 {
7577 HMVMX_CHECK_BREAK(CPUMIsGuestPagingEnabledEx(pCtx), VMX_IGS_CR0_PG_LONGMODE);
7578 HMVMX_CHECK_BREAK((pCtx->cr4 & X86_CR4_PAE), VMX_IGS_CR4_PAE_LONGMODE);
7579 }
7580
7581 if ( !fLongModeGuest
7582 && (pCtx->cr4 & X86_CR4_PCIDE))
7583 {
7584 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
7585 }
7586
7587 /** @todo CR3 field must be such that bits 63:52 and bits in the range
7588 * 51:32 beyond the processor's physical-address width are 0. */
7589
7590 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
7591 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
7592 {
7593 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
7594 }
7595
7596 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
7597 AssertRCBreak(rc);
7598 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
7599
7600 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
7601 AssertRCBreak(rc);
7602 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
7603 }
7604#endif
7605
7606 /*
7607 * PERF_GLOBAL MSR.
7608 */
7609 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR)
7610 {
7611 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
7612 AssertRCBreak(rc);
7613 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
7614 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63-35, bits 31-2 MBZ. */
7615 }
7616
7617 /*
7618 * PAT MSR.
7619 */
7620 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR)
7621 {
7622 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
7623 AssertRCBreak(rc);
7624 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
7625 for (unsigned i = 0; i < 8; i++)
7626 {
7627 uint8_t u8Val = (u64Val & 0x7);
7628 if ( u8Val != 0 /* UC */
7629 || u8Val != 1 /* WC */
7630 || u8Val != 4 /* WT */
7631 || u8Val != 5 /* WP */
7632 || u8Val != 6 /* WB */
7633 || u8Val != 7 /* UC- */)
7634 {
7635 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
7636 }
7637 u64Val >>= 3;
7638 }
7639 }
7640
7641 /*
7642 * EFER MSR.
7643 */
7644 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
7645 {
7646 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
7647 AssertRCBreak(rc);
7648 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
7649 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63-12, bit 9, bits 7-1 MBZ. */
7650 HMVMX_CHECK_BREAK((u64Val & MSR_K6_EFER_LMA) == (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),
7651 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
7652 HMVMX_CHECK_BREAK( fUnrestrictedGuest
7653 || (u64Val & MSR_K6_EFER_LMA) == (pCtx->cr0 & X86_CR0_PG), VMX_IGS_EFER_LMA_PG_MISMATCH);
7654 }
7655
7656 /*
7657 * Segment registers.
7658 */
7659 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
7660 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
7661 if (!(u32EFlags & X86_EFL_VM))
7662 {
7663 /* CS */
7664 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
7665 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
7666 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
7667 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
7668 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
7669 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
7670 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
7671 /* CS cannot be loaded with NULL in protected mode. */
7672 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
7673 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
7674 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
7675 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
7676 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
7677 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
7678 else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
7679 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
7680 else
7681 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
7682
7683 /* SS */
7684 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
7685 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
7686 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
7687 if ( !(pCtx->cr0 & X86_CR0_PE)
7688 || pCtx->cs.Attr.n.u4Type == 3)
7689 {
7690 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
7691 }
7692 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
7693 {
7694 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
7695 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
7696 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
7697 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
7698 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
7699 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
7700 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
7701 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
7702 }
7703
7704 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
7705 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
7706 {
7707 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
7708 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
7709 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
7710 || pCtx->ds.Attr.n.u4Type > 11
7711 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
7712 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
7713 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
7714 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
7715 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
7716 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
7717 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
7718 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
7719 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
7720 }
7721 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
7722 {
7723 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
7724 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
7725 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
7726 || pCtx->es.Attr.n.u4Type > 11
7727 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
7728 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
7729 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
7730 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
7731 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
7732 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
7733 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
7734 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
7735 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
7736 }
7737 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
7738 {
7739 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
7740 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
7741 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
7742 || pCtx->fs.Attr.n.u4Type > 11
7743 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
7744 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
7745 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
7746 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
7747 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
7748 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
7749 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
7750 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
7751 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
7752 }
7753 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
7754 {
7755 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
7756 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
7757 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
7758 || pCtx->gs.Attr.n.u4Type > 11
7759 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
7760 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
7761 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
7762 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
7763 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
7764 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
7765 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
7766 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
7767 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
7768 }
7769 /* 64-bit capable CPUs. */
7770#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
7771 if (HMVMX_IS_64BIT_HOST_MODE())
7772 {
7773 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
7774 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
7775 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
7776 || HMVMX_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
7777 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
7778 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
7779 VMX_IGS_LONGMODE_SS_BASE_INVALID);
7780 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
7781 VMX_IGS_LONGMODE_DS_BASE_INVALID);
7782 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
7783 VMX_IGS_LONGMODE_ES_BASE_INVALID);
7784 }
7785#endif
7786 }
7787 else
7788 {
7789 /* V86 mode checks. */
7790 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
7791 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
7792 {
7793 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
7794 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
7795 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
7796 }
7797 else
7798 {
7799 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
7800 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
7801 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
7802 }
7803
7804 /* CS */
7805 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
7806 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
7807 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
7808 /* SS */
7809 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
7810 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
7811 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
7812 /* DS */
7813 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
7814 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
7815 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
7816 /* ES */
7817 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
7818 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
7819 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
7820 /* FS */
7821 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
7822 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
7823 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
7824 /* GS */
7825 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
7826 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
7827 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
7828 /* 64-bit capable CPUs. */
7829#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
7830 if (HMVMX_IS_64BIT_HOST_MODE())
7831 {
7832 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
7833 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
7834 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
7835 || HMVMX_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
7836 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
7837 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
7838 VMX_IGS_LONGMODE_SS_BASE_INVALID);
7839 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
7840 VMX_IGS_LONGMODE_DS_BASE_INVALID);
7841 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
7842 VMX_IGS_LONGMODE_ES_BASE_INVALID);
7843 }
7844#endif
7845 }
7846
7847 /*
7848 * TR.
7849 */
7850 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
7851 /* 64-bit capable CPUs. */
7852#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
7853 if (HMVMX_IS_64BIT_HOST_MODE())
7854 {
7855 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
7856 }
7857#endif
7858 if (fLongModeGuest)
7859 {
7860 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
7861 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
7862 }
7863 else
7864 {
7865 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
7866 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
7867 VMX_IGS_TR_ATTR_TYPE_INVALID);
7868 }
7869 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
7870 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
7871 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11-8 MBZ. */
7872 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
7873 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
7874 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
7875 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
7876 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
7877
7878 /*
7879 * GDTR and IDTR.
7880 */
7881#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
7882 if (HMVMX_IS_64BIT_HOST_MODE())
7883 {
7884 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
7885 AssertRCBreak(rc);
7886 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
7887
7888 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
7889 AssertRCBreak(rc);
7890 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
7891 }
7892#endif
7893
7894 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
7895 AssertRCBreak(rc);
7896 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
7897
7898 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
7899 AssertRCBreak(rc);
7900 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
7901
7902 /*
7903 * Guest Non-Register State.
7904 */
7905 /* Activity State. */
7906 uint32_t u32ActivityState;
7907 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
7908 AssertRCBreak(rc);
7909 HMVMX_CHECK_BREAK( !u32ActivityState
7910 || (u32ActivityState & MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.msr.vmx_misc)),
7911 VMX_IGS_ACTIVITY_STATE_INVALID);
7912 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
7913 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
7914 uint32_t u32IntrState;
7915 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32IntrState);
7916 AssertRCBreak(rc);
7917 if ( u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
7918 || u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
7919 {
7920 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
7921 }
7922
7923 /** @todo Activity state and injecting interrupts. Left as a todo since we
7924 * currently don't use activity states but ACTIVE. */
7925
7926 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
7927 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
7928
7929 /* Guest interruptibility-state. */
7930 HMVMX_CHECK_BREAK(!(u32IntrState & 0xfffffff0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
7931 HMVMX_CHECK_BREAK((u32IntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
7932 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS))
7933 != ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
7934 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
7935 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
7936 HMVMX_CHECK_BREAK( (u32EFlags & X86_EFL_IF)
7937 || !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
7938 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
7939 if (VMX_ENTRY_INTERRUPTION_INFO_VALID(u32EntryInfo))
7940 {
7941 if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7942 {
7943 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
7944 && !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
7945 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
7946 }
7947 else if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
7948 {
7949 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
7950 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
7951 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
7952 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
7953 }
7954 }
7955 /** @todo Assumes the processor is not in SMM. */
7956 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
7957 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
7958 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
7959 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
7960 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
7961 if ( (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
7962 && VMX_ENTRY_INTERRUPTION_INFO_VALID(u32EntryInfo)
7963 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
7964 {
7965 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI),
7966 VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
7967 }
7968
7969 /* Pending debug exceptions. */
7970 if (HMVMX_IS_64BIT_HOST_MODE())
7971 {
7972 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val);
7973 AssertRCBreak(rc);
7974 /* Bits 63-15, Bit 13, Bits 11-4 MBZ. */
7975 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
7976 u32Val = u64Val; /* For pending debug exceptions checks below. */
7977 }
7978 else
7979 {
7980 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val);
7981 AssertRCBreak(rc);
7982 /* Bits 31-15, Bit 13, Bits 11-4 MBZ. */
7983 HMVMX_CHECK_BREAK(!(u64Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
7984 }
7985
7986 if ( (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
7987 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS)
7988 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
7989 {
7990 if ( (u32EFlags & X86_EFL_TF)
7991 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
7992 {
7993 /* Bit 14 is PendingDebug.BS. */
7994 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
7995 }
7996 if ( !(u32EFlags & X86_EFL_TF)
7997 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
7998 {
7999 /* Bit 14 is PendingDebug.BS. */
8000 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
8001 }
8002 }
8003
8004 /* VMCS link pointer. */
8005 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
8006 AssertRCBreak(rc);
8007 if (u64Val != UINT64_C(0xffffffffffffffff))
8008 {
8009 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
8010 /** @todo Bits beyond the processor's physical-address width MBZ. */
8011 /** @todo 32-bit located in memory referenced by value of this field (as a
8012 * physical address) must contain the processor's VMCS revision ID. */
8013 /** @todo SMM checks. */
8014 }
8015
8016 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries. */
8017
8018 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
8019 if (uError == VMX_IGS_ERROR)
8020 uError = VMX_IGS_REASON_NOT_FOUND;
8021 } while (0);
8022
8023 pVCpu->hm.s.u32HMError = uError;
8024 return uError;
8025
8026#undef HMVMX_ERROR_BREAK
8027#undef HMVMX_CHECK_BREAK
8028#undef HMVMX_IS_CANONICAL
8029}
8030
8031/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
8032/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
8033/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
8034
8035/** @name VM-exit handlers.
8036 * @{
8037 */
8038
8039/**
8040 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
8041 */
8042HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8043{
8044 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8045 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
8046 /* 32-bit Windows hosts (4 cores) has trouble with this; causes higher interrupt latency. */
8047#if HC_ARCH_BITS == 64 && defined(VBOX_WITH_VMMR0_DISABLE_PREEMPTION)
8048 Assert(ASMIntAreEnabled());
8049 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUsePreemptTimer)
8050 return VINF_SUCCESS;
8051#endif
8052 return VINF_EM_RAW_INTERRUPT;
8053}
8054
8055
8056/**
8057 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
8058 */
8059HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8060{
8061 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8062 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
8063
8064 int rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8065 AssertRCReturn(rc, rc);
8066
8067 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntrInfo);
8068 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)
8069 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
8070 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntrInfo));
8071
8072 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
8073 {
8074 /*
8075 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we injected it ourselves and
8076 * anything we inject is not going to cause a VM-exit directly for the event being injected.
8077 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
8078 *
8079 * Dispatch the NMI to the host. See Intel spec. 27.5.5 "Updating Non-Register State".
8080 */
8081 VMXDispatchHostNmi();
8082 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmi);
8083 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
8084 return VINF_SUCCESS;
8085 }
8086
8087 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8088 rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
8089 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
8090 {
8091 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
8092 return VINF_SUCCESS;
8093 }
8094 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8095 {
8096 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
8097 return rc;
8098 }
8099
8100 uint32_t uExitIntrInfo = pVmxTransient->uExitIntrInfo;
8101 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntrInfo);
8102 switch (uIntrType)
8103 {
8104 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
8105 Assert(uVector == X86_XCPT_DB || uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
8106 /* no break */
8107 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
8108 {
8109 switch (uVector)
8110 {
8111 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
8112 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
8113 case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVCpu, pMixedCtx, pVmxTransient); break;
8114 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
8115 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
8116 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
8117#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
8118 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
8119 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
8120 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
8121 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
8122 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
8123 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
8124 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
8125 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
8126 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
8127 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
8128#endif
8129 default:
8130 {
8131 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8132 AssertRCReturn(rc, rc);
8133
8134 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
8135 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
8136 {
8137 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
8138 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
8139 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8140 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8141 AssertRCReturn(rc, rc);
8142 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntrInfo),
8143 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode,
8144 0 /* GCPtrFaultAddress */);
8145 AssertRCReturn(rc, rc);
8146 }
8147 else
8148 {
8149 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
8150 pVCpu->hm.s.u32HMError = uVector;
8151 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
8152 }
8153 break;
8154 }
8155 }
8156 break;
8157 }
8158
8159 default:
8160 {
8161 pVCpu->hm.s.u32HMError = uExitIntrInfo;
8162 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE;
8163 AssertMsgFailed(("Unexpected interruption code %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntrInfo)));
8164 break;
8165 }
8166 }
8167 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
8168 return rc;
8169}
8170
8171
8172/**
8173 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
8174 */
8175HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8176{
8177 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8178
8179 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
8180 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
8181 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
8182 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
8183 AssertRCReturn(rc, rc);
8184
8185 /* Deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and resume guest execution. */
8186 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
8187 return VINF_SUCCESS;
8188}
8189
8190
8191/**
8192 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
8193 */
8194HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8195{
8196 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8197 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
8198 pVCpu->hm.s.u32HMError = VMX_EXIT_NMI_WINDOW;
8199 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8200}
8201
8202
8203/**
8204 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
8205 */
8206HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8207{
8208 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8209 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
8210 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8211}
8212
8213
8214/**
8215 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
8216 */
8217HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8218{
8219 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8220 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
8221 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8222}
8223
8224
8225/**
8226 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
8227 */
8228HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8229{
8230 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8231 PVM pVM = pVCpu->CTX_SUFF(pVM);
8232 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8233 if (RT_LIKELY(rc == VINF_SUCCESS))
8234 {
8235 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8236 Assert(pVmxTransient->cbInstr == 2);
8237 }
8238 else
8239 {
8240 AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
8241 rc = VERR_EM_INTERPRETER;
8242 }
8243 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
8244 return rc;
8245}
8246
8247
8248/**
8249 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
8250 */
8251HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8252{
8253 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8254 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
8255 AssertRCReturn(rc, rc);
8256
8257 if (pMixedCtx->cr4 & X86_CR4_SMXE)
8258 return VINF_EM_RAW_EMULATE_INSTR;
8259
8260 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
8261 pVCpu->hm.s.u32HMError = VMX_EXIT_GETSEC;
8262 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8263}
8264
8265
8266/**
8267 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
8268 */
8269HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8270{
8271 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8272 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
8273 AssertRCReturn(rc, rc);
8274
8275 PVM pVM = pVCpu->CTX_SUFF(pVM);
8276 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8277 if (RT_LIKELY(rc == VINF_SUCCESS))
8278 {
8279 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8280 Assert(pVmxTransient->cbInstr == 2);
8281 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
8282 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
8283 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
8284 }
8285 else
8286 {
8287 AssertMsgFailed(("hmR0VmxExitRdtsc: EMInterpretRdtsc failed with %Rrc\n", rc));
8288 rc = VERR_EM_INTERPRETER;
8289 }
8290 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
8291 return rc;
8292}
8293
8294
8295/**
8296 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
8297 */
8298HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8299{
8300 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8301 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
8302 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */
8303 AssertRCReturn(rc, rc);
8304
8305 PVM pVM = pVCpu->CTX_SUFF(pVM);
8306 rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
8307 if (RT_LIKELY(rc == VINF_SUCCESS))
8308 {
8309 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8310 Assert(pVmxTransient->cbInstr == 3);
8311 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
8312 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
8313 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
8314 }
8315 else
8316 {
8317 AssertMsgFailed(("hmR0VmxExitRdtscp: EMInterpretRdtscp failed with %Rrc\n", rc));
8318 rc = VERR_EM_INTERPRETER;
8319 }
8320 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
8321 return rc;
8322}
8323
8324
8325/**
8326 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
8327 */
8328HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8329{
8330 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8331 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
8332 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */
8333 AssertRCReturn(rc, rc);
8334
8335 PVM pVM = pVCpu->CTX_SUFF(pVM);
8336 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8337 if (RT_LIKELY(rc == VINF_SUCCESS))
8338 {
8339 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8340 Assert(pVmxTransient->cbInstr == 2);
8341 }
8342 else
8343 {
8344 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
8345 rc = VERR_EM_INTERPRETER;
8346 }
8347 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
8348 return rc;
8349}
8350
8351
8352/**
8353 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
8354 */
8355HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8356{
8357 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8358 PVM pVM = pVCpu->CTX_SUFF(pVM);
8359 Assert(!pVM->hm.s.fNestedPaging);
8360
8361 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8362 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8363 AssertRCReturn(rc, rc);
8364
8365 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
8366 rc = VBOXSTRICTRC_VAL(rc2);
8367 if (RT_LIKELY(rc == VINF_SUCCESS))
8368 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8369 else
8370 {
8371 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RX64 failed with %Rrc\n",
8372 pVmxTransient->uExitQualification, rc));
8373 }
8374 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
8375 return rc;
8376}
8377
8378
8379/**
8380 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
8381 */
8382HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8383{
8384 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8385 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8386 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8387 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8388 AssertRCReturn(rc, rc);
8389
8390 PVM pVM = pVCpu->CTX_SUFF(pVM);
8391 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8392 if (RT_LIKELY(rc == VINF_SUCCESS))
8393 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8394 else
8395 {
8396 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
8397 rc = VERR_EM_INTERPRETER;
8398 }
8399 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
8400 return rc;
8401}
8402
8403
8404/**
8405 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
8406 */
8407HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8408{
8409 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8410 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8411 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8412 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8413 AssertRCReturn(rc, rc);
8414
8415 PVM pVM = pVCpu->CTX_SUFF(pVM);
8416 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8417 rc = VBOXSTRICTRC_VAL(rc2);
8418 if (RT_LIKELY( rc == VINF_SUCCESS
8419 || rc == VINF_EM_HALT))
8420 {
8421 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8422 AssertRCReturn(rc3, rc3);
8423
8424 if ( rc == VINF_EM_HALT
8425 && EMShouldContinueAfterHalt(pVCpu, pMixedCtx))
8426 {
8427 rc = VINF_SUCCESS;
8428 }
8429 }
8430 else
8431 {
8432 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
8433 rc = VERR_EM_INTERPRETER;
8434 }
8435 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
8436 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
8437 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
8438 return rc;
8439}
8440
8441
8442/**
8443 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
8444 */
8445HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8446{
8447 /*
8448 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
8449 * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
8450 * executing VMCALL in VMX root operation. If we get here, something funny is going on.
8451 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
8452 */
8453 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8454 pVCpu->hm.s.u32HMError = VMX_EXIT_RSM;
8455 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8456}
8457
8458
8459/**
8460 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
8461 */
8462HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8463{
8464 /*
8465 * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
8466 * root operation. Only an STM (SMM transfer monitor) would get this exit when we (the executive monitor) execute a VMCALL
8467 * in VMX root mode or receive an SMI. If we get here, something funny is going on.
8468 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
8469 */
8470 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8471 pVCpu->hm.s.u32HMError = VMX_EXIT_SMI;
8472 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8473}
8474
8475
8476/**
8477 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
8478 */
8479HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8480{
8481 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
8482 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8483 pVCpu->hm.s.u32HMError = VMX_EXIT_IO_SMI;
8484 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8485}
8486
8487
8488/**
8489 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
8490 */
8491HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8492{
8493 /*
8494 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
8495 * don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
8496 * See Intel spec. 25.3 "Other Causes of VM-exits".
8497 */
8498 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8499 pVCpu->hm.s.u32HMError = VMX_EXIT_SIPI;
8500 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8501}
8502
8503
8504/**
8505 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
8506 * VM-exit.
8507 */
8508HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8509{
8510 /*
8511 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8512 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery" and Intel spec. 29.3 "VMX Instructions" for "VMXON".
8513 *
8514 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these VM-exits.
8515 * See Intel spec. "23.8 Restrictions on VMX operation".
8516 */
8517 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8518 return VINF_SUCCESS;
8519}
8520
8521
8522/**
8523 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
8524 * VM-exit.
8525 */
8526HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8527{
8528 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8529 return VINF_EM_RESET;
8530}
8531
8532
8533/**
8534 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8535 */
8536HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8537{
8538 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8539 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
8540 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
8541 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8542 AssertRCReturn(rc, rc);
8543
8544 pMixedCtx->rip++;
8545 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
8546 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
8547 rc = VINF_SUCCESS;
8548 else
8549 rc = VINF_EM_HALT;
8550
8551 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
8552 return rc;
8553}
8554
8555
8556/**
8557 * VM-exit handler for instructions that result in a #UD exception delivered to
8558 * the guest.
8559 */
8560HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8561{
8562 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8563 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
8564 return VINF_SUCCESS;
8565}
8566
8567
8568/**
8569 * VM-exit handler for expiry of the VMX preemption timer.
8570 */
8571HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8572{
8573 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8574
8575 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
8576 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
8577
8578 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8579 PVM pVM = pVCpu->CTX_SUFF(pVM);
8580 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8581 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
8582 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8583}
8584
8585
8586/**
8587 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8588 */
8589HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8590{
8591 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8592
8593 /* We expose XSETBV to the guest, fallback to the recompiler for emulation. */
8594 /** @todo check if XSETBV is supported by the recompiler. */
8595 return VERR_EM_INTERPRETER;
8596}
8597
8598
8599/**
8600 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8601 */
8602HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8603{
8604 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8605
8606 /* The guest should not invalidate the host CPU's TLBs, fallback to recompiler. */
8607 /** @todo implement EMInterpretInvpcid() */
8608 return VERR_EM_INTERPRETER;
8609}
8610
8611
8612/**
8613 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
8614 * Error VM-exit.
8615 */
8616HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8617{
8618 uint32_t uIntrState;
8619 HMVMXHCUINTREG uHCReg;
8620 uint64_t u64Val;
8621 uint32_t u32Val;
8622
8623 int rc = hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
8624 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
8625 rc |= hmR0VmxReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
8626 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
8627 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8628 AssertRCReturn(rc, rc);
8629
8630 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
8631 NOREF(uInvalidReason);
8632
8633 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntrInfo));
8634 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8635 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8636 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));
8637
8638 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
8639 Log4(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
8640 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
8641 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
8642 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
8643 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
8644 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
8645 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
8646 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
8647 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
8648 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8649 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8650
8651 PVM pVM = pVCpu->CTX_SUFF(pVM);
8652 HMDumpRegs(pVM, pVCpu, pMixedCtx);
8653
8654 return VERR_VMX_INVALID_GUEST_STATE;
8655}
8656
8657
8658/**
8659 * VM-exit handler for VM-entry failure due to an MSR-load
8660 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
8661 */
8662HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8663{
8664 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8665 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8666}
8667
8668
8669/**
8670 * VM-exit handler for VM-entry failure due to a machine-check event
8671 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
8672 */
8673HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8674{
8675 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8676 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8677}
8678
8679
8680/**
8681 * VM-exit handler for all undefined reasons. Should never ever happen.. in
8682 * theory.
8683 */
8684HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8685{
8686 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
8687 return VERR_VMX_UNDEFINED_EXIT_CODE;
8688}
8689
8690
8691/**
8692 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
8693 * (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
8694 * Conditional VM-exit.
8695 */
8696HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8697{
8698 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8699
8700 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
8701 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
8702 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
8703 return VERR_EM_INTERPRETER;
8704 AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8705 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8706}
8707
8708
8709/**
8710 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
8711 */
8712HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8713{
8714 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8715
8716 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
8717 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
8718 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
8719 return VERR_EM_INTERPRETER;
8720 AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8721 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8722}
8723
8724
8725/**
8726 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8727 */
8728HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8729{
8730 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8731
8732 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
8733 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8734 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8735 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8736 AssertRCReturn(rc, rc);
8737
8738 PVM pVM = pVCpu->CTX_SUFF(pVM);
8739 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8740 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER,
8741 ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
8742 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
8743
8744 if (RT_LIKELY(rc == VINF_SUCCESS))
8745 {
8746 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8747 Assert(pVmxTransient->cbInstr == 2);
8748 }
8749 return rc;
8750}
8751
8752
8753/**
8754 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8755 */
8756HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8757{
8758 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8759 PVM pVM = pVCpu->CTX_SUFF(pVM);
8760 int rc = VINF_SUCCESS;
8761
8762 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
8763 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8764 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8765 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8766 AssertRCReturn(rc, rc);
8767 Log4(("ecx=%#RX32\n", pMixedCtx->ecx));
8768
8769 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8770 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));
8771 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
8772
8773 if (RT_LIKELY(rc == VINF_SUCCESS))
8774 {
8775 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8776
8777 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8778 if ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START
8779 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
8780 {
8781 /* We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register
8782 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before
8783 EMInterpretWrmsr() changes it. */
8784 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
8785 }
8786 else if (pMixedCtx->ecx == MSR_K6_EFER) /* EFER is the only MSR we auto-load but don't allow write-passthrough. */
8787 {
8788 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
8789 AssertRCReturn(rc, rc);
8790 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS;
8791 }
8792 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8793 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
8794
8795 /* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */
8796 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
8797 {
8798 switch (pMixedCtx->ecx)
8799 {
8800 case MSR_IA32_SYSENTER_CS: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_CS_MSR; break;
8801 case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;
8802 case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;
8803 case MSR_K8_FS_BASE: /* no break */
8804 case MSR_K8_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS; break;
8805 case MSR_K8_KERNEL_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS; break;
8806 }
8807 }
8808#ifdef VBOX_STRICT
8809 else
8810 {
8811 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8812 switch (pMixedCtx->ecx)
8813 {
8814 case MSR_IA32_SYSENTER_CS:
8815 case MSR_IA32_SYSENTER_EIP:
8816 case MSR_IA32_SYSENTER_ESP:
8817 case MSR_K8_FS_BASE:
8818 case MSR_K8_GS_BASE:
8819 {
8820 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
8821 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8822 }
8823
8824 case MSR_K8_LSTAR:
8825 case MSR_K6_STAR:
8826 case MSR_K8_SF_MASK:
8827 case MSR_K8_TSC_AUX:
8828 case MSR_K8_KERNEL_GS_BASE:
8829 {
8830 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8831 pMixedCtx->ecx));
8832 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8833 }
8834 }
8835 }
8836#endif /* VBOX_STRICT */
8837 }
8838 return rc;
8839}
8840
8841
8842/**
8843 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8844 */
8845HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8846{
8847 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8848
8849 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT. */
8850 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
8851 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT)
8852 return VERR_EM_INTERPRETER;
8853 AssertMsgFailed(("Unexpected PAUSE exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8854 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8855}
8856
8857
8858/**
8859 * VM-exit handler for when the TPR value is lowered below the specified
8860 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8861 */
8862HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8863{
8864 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8865 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
8866
8867 /*
8868 * The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
8869 * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and
8870 * resume guest execution.
8871 */
8872 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
8873 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
8874 return VINF_SUCCESS;
8875}
8876
8877
8878/**
8879 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8880 * VM-exit.
8881 *
8882 * @retval VINF_SUCCESS when guest execution can continue.
8883 * @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
8884 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8885 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
8886 * recompiler.
8887 */
8888HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8889{
8890 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8891 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
8892 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8893 AssertRCReturn(rc, rc);
8894
8895 const RTGCUINTPTR uExitQualification = pVmxTransient->uExitQualification;
8896 const uint32_t uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
8897 PVM pVM = pVCpu->CTX_SUFF(pVM);
8898 switch (uAccessType)
8899 {
8900 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */
8901 {
8902#if 0
8903 /* EMInterpretCRxWrite() references a lot of guest state (EFER, RFLAGS, Segment Registers, etc.) Sync entire state */
8904 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8905#else
8906 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8907 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8908 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8909#endif
8910 AssertRCReturn(rc, rc);
8911
8912 rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
8913 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
8914 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification));
8915 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
8916
8917 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification))
8918 {
8919 case 0: /* CR0 */
8920 Log4(("CRX CR0 write rc=%d CR0=%#RX64\n", rc, pMixedCtx->cr0));
8921 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
8922 break;
8923 case 2: /* C2 **/
8924 /* Nothing to do here, CR2 it's not part of the VMCS. */
8925 break;
8926 case 3: /* CR3 */
8927 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
8928 Log4(("CRX CR3 write rc=%d CR3=%#RX64\n", rc, pMixedCtx->cr3));
8929 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
8930 break;
8931 case 4: /* CR4 */
8932 Log4(("CRX CR4 write rc=%d CR4=%#RX64\n", rc, pMixedCtx->cr4));
8933 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
8934 break;
8935 case 8: /* CR8 */
8936 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
8937 /* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */
8938 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
8939 break;
8940 default:
8941 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
8942 break;
8943 }
8944
8945 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
8946 break;
8947 }
8948
8949 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */
8950 {
8951 /* EMInterpretCRxRead() requires EFER MSR, CS. */
8952 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8953 AssertRCReturn(rc, rc);
8954 Assert( !pVM->hm.s.fNestedPaging
8955 || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
8956 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
8957
8958 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8959 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
8960 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
8961
8962 rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
8963 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification),
8964 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification));
8965 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8966 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
8967 Log4(("CRX CR%d Read access rc=%d\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), rc));
8968 break;
8969 }
8970
8971 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
8972 {
8973 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8974 AssertRCReturn(rc, rc);
8975 rc = EMInterpretCLTS(pVM, pVCpu);
8976 AssertRCReturn(rc, rc);
8977 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
8978 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
8979 Log4(("CRX CLTS write rc=%d\n", rc));
8980 break;
8981 }
8982
8983 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
8984 {
8985 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8986 AssertRCReturn(rc, rc);
8987 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
8988 if (RT_LIKELY(rc == VINF_SUCCESS))
8989 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
8990 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
8991 Log4(("CRX LMSW write rc=%d\n", rc));
8992 break;
8993 }
8994
8995 default:
8996 {
8997 AssertMsgFailed(("Invalid access-type in Mov CRx exit qualification %#x\n", uAccessType));
8998 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
8999 }
9000 }
9001
9002 /* Validate possible error codes. */
9003 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_CHANGE_MODE || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_SYNC_CR3
9004 || rc == VERR_VMX_UNEXPECTED_EXCEPTION);
9005 if (RT_SUCCESS(rc))
9006 {
9007 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9008 AssertRCReturn(rc2, rc2);
9009 }
9010
9011 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
9012 return rc;
9013}
9014
9015
9016/**
9017 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
9018 * VM-exit.
9019 */
9020HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9021{
9022 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9023 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
9024
9025 int rc2 = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9026 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9027 rc2 |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
9028 rc2 |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */
9029 rc2 |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
9030 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
9031 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
9032 AssertRCReturn(rc2, rc2);
9033
9034 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
9035 uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification);
9036 uint8_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification);
9037 bool fIOWrite = ( VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)
9038 == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
9039 bool fIOString = VMX_EXIT_QUALIFICATION_IO_IS_STRING(pVmxTransient->uExitQualification);
9040 AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_HMVMX_IPE_1);
9041
9042 /* I/O operation lookup arrays. */
9043 static const uint32_t s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */
9044 static const uint32_t s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */
9045
9046 VBOXSTRICTRC rcStrict;
9047 const uint32_t cbValue = s_aIOSizes[uIOWidth];
9048 const uint32_t cbInstr = pVmxTransient->cbInstr;
9049 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
9050 PVM pVM = pVCpu->CTX_SUFF(pVM);
9051 if (fIOString)
9052 {
9053 /*
9054 * INS/OUTS - I/O String instruction.
9055 *
9056 * Use instruction-information if available, otherwise fall back on
9057 * interpreting the instruction.
9058 */
9059 Log4(("CS:RIP=%04x:%#RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
9060#if 0 /* Not quite ready, seem iSegReg assertion trigger once... Do we perhaps need to always read that in longjmp / preempt scenario? */
9061 AssertReturn(pMixedCtx->dx == uIOPort, VERR_HMVMX_IPE_2);
9062 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.msr.vmx_basic_info))
9063 {
9064 rc2 = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
9065 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
9066 rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9067 AssertRCReturn(rc2, rc2);
9068 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_HMVMX_IPE_3);
9069 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
9070 IEMMODE enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
9071 bool fRep = VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification);
9072 if (fIOWrite)
9073 {
9074 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
9075 pVmxTransient->ExitInstrInfo.StrIo.iSegReg);
9076 //if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
9077 // hmR0SavePendingIOPortWriteStr(pVCpu, pMixedCtx->rip, cbValue, enmAddrMode, fRep, cbInstr,
9078 // pVmxTransient->ExitInstrInfo.StrIo.iSegReg);
9079 }
9080 else
9081 {
9082 AssertMsgReturn(pVmxTransient->ExitInstrInfo.StrIo.iSegReg == X86_SREG_ES,
9083 ("%#x (%#llx)\n", pVmxTransient->ExitInstrInfo.StrIo.iSegReg, pVmxTransient->ExitInstrInfo.u),
9084 VERR_HMVMX_IPE_4);
9085 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr);
9086 //if (rcStrict == VINF_IOM_R3_IOPORT_READ)
9087 // hmR0SavePendingIOPortReadStr(pVCpu, pMixedCtx->rip, cbValue, enmAddrMode, fRep, cbInstr);
9088 }
9089 }
9090 else
9091 {
9092 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
9093 rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9094 AssertRCReturn(rc2, rc2);
9095 rcStrict = IEMExecOne(pVCpu);
9096 }
9097 /** @todo IEM needs to be setting these flags somehow. */
9098 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
9099 fUpdateRipAlready = true;
9100
9101#else
9102 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
9103 rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
9104 if (RT_SUCCESS(rcStrict))
9105 {
9106 if (fIOWrite)
9107 {
9108 rcStrict = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
9109 (DISCPUMODE)pDis->uAddrMode, cbValue);
9110 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
9111 }
9112 else
9113 {
9114 rcStrict = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
9115 (DISCPUMODE)pDis->uAddrMode, cbValue);
9116 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
9117 }
9118 }
9119 else
9120 {
9121 AssertMsg(rcStrict == VERR_EM_INTERPRETER, ("rcStrict=%Rrc RIP %#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->rip));
9122 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9123 }
9124#endif
9125 }
9126 else
9127 {
9128 /*
9129 * IN/OUT - I/O instruction.
9130 */
9131 Log4(("CS:RIP=%04x:%#RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
9132 const uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
9133 Assert(!VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification));
9134 if (fIOWrite)
9135 {
9136 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue);
9137 if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
9138 HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
9139 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
9140 }
9141 else
9142 {
9143 uint32_t u32Result = 0;
9144 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
9145 if (IOM_SUCCESS(rcStrict))
9146 {
9147 /* Save result of I/O IN instr. in AL/AX/EAX. */
9148 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
9149 }
9150 else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
9151 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
9152 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
9153 }
9154 }
9155
9156 if (IOM_SUCCESS(rcStrict))
9157 {
9158 if (!fUpdateRipAlready)
9159 {
9160 pMixedCtx->rip += cbInstr;
9161 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
9162 }
9163
9164 /*
9165 * If any I/O breakpoints are armed, we need to check if one triggered
9166 * and take appropriate action.
9167 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
9168 */
9169 rc2 = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
9170 AssertRCReturn(rc2, rc2);
9171
9172 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
9173 * execution engines about whether hyper BPs and such are pending. */
9174 uint32_t const uDr7 = pMixedCtx->dr[7];
9175 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
9176 && X86_DR7_ANY_RW_IO(uDr7)
9177 && (pMixedCtx->cr4 & X86_CR4_DE))
9178 || DBGFBpIsHwIoArmed(pVM)))
9179 {
9180 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
9181 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /*fDr6*/);
9182
9183 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pMixedCtx, uIOPort, cbValue);
9184 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
9185 {
9186 /* Raise #DB. */
9187 if (fIsGuestDbgActive)
9188 ASMSetDR6(pMixedCtx->dr[6]);
9189 if (pMixedCtx->dr[7] != uDr7)
9190 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
9191
9192 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
9193 }
9194 /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */
9195 else if ( rcStrict2 != VINF_SUCCESS
9196 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
9197 rcStrict = rcStrict2;
9198 }
9199 }
9200
9201#ifdef DEBUG
9202 if (rcStrict == VINF_IOM_R3_IOPORT_READ)
9203 Assert(!fIOWrite);
9204 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
9205 Assert(fIOWrite);
9206 else
9207 {
9208 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
9209 * statuses, that the VMM device and some others may return. See
9210 * IOM_SUCCESS() for guidance. */
9211 AssertMsg( RT_FAILURE(rcStrict)
9212 || rcStrict == VINF_SUCCESS
9213 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
9214 || rcStrict == VINF_EM_DBG_BREAKPOINT
9215 || rcStrict == VINF_EM_RAW_GUEST_TRAP
9216 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9217 }
9218#endif
9219
9220 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
9221 return VBOXSTRICTRC_TODO(rcStrict);
9222}
9223
9224
9225/**
9226 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
9227 * VM-exit.
9228 */
9229HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9230{
9231 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9232
9233 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
9234 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9235 AssertRCReturn(rc, rc);
9236 if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
9237 {
9238 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
9239 AssertRCReturn(rc, rc);
9240 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
9241 {
9242 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
9243
9244 /* Software interrupts and exceptions will be regenerated when the recompiler restarts the instruction. */
9245 if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
9246 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
9247 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
9248 {
9249 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
9250 bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
9251
9252 /* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */
9253 Assert(!pVCpu->hm.s.Event.fPending);
9254 pVCpu->hm.s.Event.fPending = true;
9255 pVCpu->hm.s.Event.u64IntrInfo = pVmxTransient->uIdtVectoringInfo;
9256 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
9257 AssertRCReturn(rc, rc);
9258 if (fErrorCodeValid)
9259 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
9260 else
9261 pVCpu->hm.s.Event.u32ErrCode = 0;
9262 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
9263 && uVector == X86_XCPT_PF)
9264 {
9265 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2;
9266 }
9267
9268 Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
9269 }
9270 }
9271 }
9272
9273 /** @todo Emulate task switch someday, currently just going back to ring-3 for
9274 * emulation. */
9275 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
9276 return VERR_EM_INTERPRETER;
9277}
9278
9279
9280/**
9281 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9282 */
9283HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9284{
9285 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9286 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
9287 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
9288 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
9289 AssertRCReturn(rc, rc);
9290 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
9291 return VINF_EM_DBG_STEPPED;
9292}
9293
9294
9295/**
9296 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9297 */
9298HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9299{
9300 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9301
9302 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
9303 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
9304 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
9305 return VINF_SUCCESS;
9306 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
9307 return rc;
9308
9309#if 0
9310 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
9311 * just sync the whole thing. */
9312 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9313#else
9314 /* Aggressive state sync. for now. */
9315 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
9316 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
9317 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9318#endif
9319 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9320 AssertRCReturn(rc, rc);
9321
9322 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9323 uint32_t uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
9324 switch (uAccessType)
9325 {
9326 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9327 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9328 {
9329 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
9330 && VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) == 0x80)
9331 {
9332 AssertMsgFailed(("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9333 }
9334
9335 RTGCPHYS GCPhys = pMixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */
9336 GCPhys &= PAGE_BASE_GC_MASK;
9337 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
9338 PVM pVM = pVCpu->CTX_SUFF(pVM);
9339 Log4(("ApicAccess uAccessType=%#x GCPhys=%#RGv Off=%#x\n", uAccessType, GCPhys,
9340 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
9341
9342 VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu,
9343 (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW,
9344 CPUMCTX2CORE(pMixedCtx), GCPhys);
9345 rc = VBOXSTRICTRC_VAL(rc2);
9346 Log4(("ApicAccess rc=%d\n", rc));
9347 if ( rc == VINF_SUCCESS
9348 || rc == VERR_PAGE_TABLE_NOT_PRESENT
9349 || rc == VERR_PAGE_NOT_PRESENT)
9350 {
9351 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9352 | HM_CHANGED_VMX_GUEST_APIC_STATE;
9353 rc = VINF_SUCCESS;
9354 }
9355 break;
9356 }
9357
9358 default:
9359 Log4(("ApicAccess uAccessType=%#x\n", uAccessType));
9360 rc = VINF_EM_RAW_EMULATE_INSTR;
9361 break;
9362 }
9363
9364 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
9365 return rc;
9366}
9367
9368
9369/**
9370 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9371 * VM-exit.
9372 */
9373HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9374{
9375 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9376
9377 /* We should -not- get this VM-exit if the guest is debugging. */
9378 if (CPUMIsGuestDebugStateActive(pVCpu))
9379 {
9380 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
9381 return VERR_VMX_UNEXPECTED_EXIT_CODE;
9382 }
9383
9384 int rc = VERR_INTERNAL_ERROR_5;
9385 if ( !DBGFIsStepping(pVCpu)
9386 && !pVCpu->hm.s.fSingleInstruction
9387 && !CPUMIsHyperDebugStateActive(pVCpu))
9388 {
9389 /* Don't intercept MOV DRx and #DB any more. */
9390 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
9391 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
9392 AssertRCReturn(rc, rc);
9393
9394 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
9395 {
9396#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
9397 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
9398 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
9399 AssertRCReturn(rc, rc);
9400#endif
9401 }
9402
9403 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9404 PVM pVM = pVCpu->CTX_SUFF(pVM);
9405 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9406 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9407
9408#ifdef VBOX_WITH_STATISTICS
9409 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9410 AssertRCReturn(rc, rc);
9411 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
9412 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
9413 else
9414 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
9415#endif
9416 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
9417 return VINF_SUCCESS;
9418 }
9419
9420 /*
9421 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date, see
9422 * hmR0VmxSaveGuestAutoLoadStoreMsrs(). Update only the segment registers from the CPU.
9423 */
9424 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9425 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9426 AssertRCReturn(rc, rc);
9427
9428 PVM pVM = pVCpu->CTX_SUFF(pVM);
9429 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
9430 {
9431 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
9432 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),
9433 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
9434 if (RT_SUCCESS(rc))
9435 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
9436 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
9437 }
9438 else
9439 {
9440 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
9441 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification),
9442 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));
9443 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
9444 }
9445
9446 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
9447 if (RT_SUCCESS(rc))
9448 {
9449 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9450 AssertRCReturn(rc2, rc2);
9451 }
9452 return rc;
9453}
9454
9455
9456/**
9457 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9458 * Conditional VM-exit.
9459 */
9460HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9461{
9462 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9463 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
9464
9465 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
9466 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
9467 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
9468 return VINF_SUCCESS;
9469 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
9470 return rc;
9471
9472 RTGCPHYS GCPhys = 0;
9473 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
9474
9475#if 0
9476 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
9477#else
9478 /* Aggressive state sync. for now. */
9479 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
9480 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
9481 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9482#endif
9483 AssertRCReturn(rc, rc);
9484
9485 /*
9486 * If we succeed, resume guest execution.
9487 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9488 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9489 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9490 * weird case. See @bugref{6043}.
9491 */
9492 PVM pVM = pVCpu->CTX_SUFF(pVM);
9493 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
9494 rc = VBOXSTRICTRC_VAL(rc2);
9495 Log4(("EPT misconfig at %#RGv RIP=%#RX64 rc=%d\n", GCPhys, pMixedCtx->rip, rc));
9496 if ( rc == VINF_SUCCESS
9497 || rc == VERR_PAGE_TABLE_NOT_PRESENT
9498 || rc == VERR_PAGE_NOT_PRESENT)
9499 {
9500 /* Successfully handled MMIO operation. */
9501 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9502 | HM_CHANGED_VMX_GUEST_APIC_STATE;
9503 rc = VINF_SUCCESS;
9504 }
9505 return rc;
9506}
9507
9508
9509/**
9510 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9511 * VM-exit.
9512 */
9513HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9514{
9515 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9516 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
9517
9518 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
9519 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
9520 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
9521 return VINF_SUCCESS;
9522 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
9523 return rc;
9524
9525 RTGCPHYS GCPhys = 0;
9526 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
9527 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9528#if 0
9529 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
9530#else
9531 /* Aggressive state sync. for now. */
9532 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
9533 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
9534 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9535#endif
9536 AssertRCReturn(rc, rc);
9537
9538 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
9539 AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));
9540
9541 RTGCUINT uErrorCode = 0;
9542 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
9543 uErrorCode |= X86_TRAP_PF_ID;
9544 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
9545 uErrorCode |= X86_TRAP_PF_RW;
9546 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
9547 uErrorCode |= X86_TRAP_PF_P;
9548
9549 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9550
9551 Log4(("EPT violation %#x at %#RX64 ErrorCode %#x CS:EIP=%04x:%#RX64\n", pVmxTransient->uExitQualification, GCPhys,
9552 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
9553
9554 /* Handle the pagefault trap for the nested shadow table. */
9555 PVM pVM = pVCpu->CTX_SUFF(pVM);
9556 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
9557 TRPMResetTrap(pVCpu);
9558
9559 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9560 if ( rc == VINF_SUCCESS
9561 || rc == VERR_PAGE_TABLE_NOT_PRESENT
9562 || rc == VERR_PAGE_NOT_PRESENT)
9563 {
9564 /* Successfully synced our nested page tables. */
9565 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
9566 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS;
9567 return VINF_SUCCESS;
9568 }
9569
9570 Log4(("EPT return to ring-3 rc=%d\n"));
9571 return rc;
9572}
9573
9574/** @} */
9575
9576/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9577/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
9578/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9579
9580/** @name VM-exit exception handlers.
9581 * @{
9582 */
9583
9584/**
9585 * VM-exit exception handler for #MF (Math Fault: floating point exception).
9586 */
9587static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9588{
9589 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
9590 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
9591
9592 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
9593 AssertRCReturn(rc, rc);
9594
9595 if (!(pMixedCtx->cr0 & X86_CR0_NE))
9596 {
9597 /* Old-style FPU error reporting needs some extra work. */
9598 /** @todo don't fall back to the recompiler, but do it manually. */
9599 return VERR_EM_INTERPRETER;
9600 }
9601
9602 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
9603 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
9604 return rc;
9605}
9606
9607
9608/**
9609 * VM-exit exception handler for #BP (Breakpoint exception).
9610 */
9611static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9612{
9613 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
9614 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
9615
9616 /** @todo Try optimize this by not saving the entire guest state unless
9617 * really needed. */
9618 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9619 AssertRCReturn(rc, rc);
9620
9621 PVM pVM = pVCpu->CTX_SUFF(pVM);
9622 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
9623 if (rc == VINF_EM_RAW_GUEST_TRAP)
9624 {
9625 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
9626 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9627 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
9628 AssertRCReturn(rc, rc);
9629
9630 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
9631 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
9632 }
9633
9634 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
9635 return rc;
9636}
9637
9638
9639/**
9640 * VM-exit exception handler for #DB (Debug exception).
9641 */
9642static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9643{
9644 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
9645 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
9646 Log6(("XcptDB\n"));
9647
9648 /*
9649 * Get the DR6-like values from the exit qualification and pass it to DBGF
9650 * for processing.
9651 */
9652 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9653 AssertRCReturn(rc, rc);
9654
9655 /* If we sat the trap flag above, we have to clear it. */ /** @todo HM should remember what it does and possibly do this elsewhere! */
9656 if ( (pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu))
9657 && !(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG))
9658 pMixedCtx->eflags.Bits.u1TF = 0;
9659
9660 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
9661 uint64_t uDR6 = X86_DR6_INIT_VAL;
9662 uDR6 |= ( pVmxTransient->uExitQualification
9663 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
9664
9665 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
9666 if (rc == VINF_EM_RAW_GUEST_TRAP)
9667 {
9668 /*
9669 * The exception was for the guest. Update DR6, DR7.GD and
9670 * IA32_DEBUGCTL.LBR before forwarding it.
9671 * (See Intel spec. 27.1 "Architectural State before a VM-Exit".)
9672 */
9673 pMixedCtx->dr[6] &= ~X86_DR6_B_MASK;
9674 pMixedCtx->dr[6] |= uDR6;
9675 if (CPUMIsGuestDebugStateActive(pVCpu))
9676 ASMSetDR6(pMixedCtx->dr[6]);
9677
9678 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
9679 AssertRCReturn(rc, rc);
9680
9681 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
9682 pMixedCtx->dr[7] &= ~X86_DR7_GD;
9683
9684 /* Paranoia. */
9685 pMixedCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
9686 pMixedCtx->dr[7] |= X86_DR7_RA1_MASK;
9687
9688 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
9689 AssertRCReturn(rc, rc);
9690
9691 /*
9692 * Raise #DB in the guest.
9693 */
9694 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
9695 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9696 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
9697 AssertRCReturn(rc, rc);
9698 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
9699 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
9700 return VINF_SUCCESS;
9701 }
9702
9703 /*
9704 * Not a guest trap, must be a hypervisor related debug event then.
9705 * Update DR6 in case someone is interested in it.
9706 */
9707 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
9708 AssertReturn(CPUMIsHyperDebugStateActive(pVCpu), VERR_HM_IPE_5);
9709 CPUMSetHyperDR6(pVCpu, uDR6);
9710
9711 return rc;
9712}
9713
9714
9715/**
9716 * VM-exit exception handler for #NM (Device-not-available exception: floating
9717 * point exception).
9718 */
9719static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9720{
9721 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
9722
9723#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
9724 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
9725#endif
9726
9727 /* We require CR0 and EFER. EFER is always up-to-date. */
9728 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
9729 AssertRCReturn(rc, rc);
9730
9731 /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */
9732 PVM pVM = pVCpu->CTX_SUFF(pVM);
9733 rc = CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
9734 if (rc == VINF_SUCCESS)
9735 {
9736 Assert(CPUMIsGuestFPUStateActive(pVCpu));
9737 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
9738 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
9739 return VINF_SUCCESS;
9740 }
9741
9742 /* Forward #NM to the guest. */
9743 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
9744 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
9745 AssertRCReturn(rc, rc);
9746 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
9747 pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);
9748 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
9749 return rc;
9750}
9751
9752
9753/**
9754 * VM-exit exception handler for #GP (General-protection exception).
9755 *
9756 * @remarks Requires pVmxTransient->uExitIntrInfo to be up-to-date.
9757 */
9758static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9759{
9760 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
9761 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
9762
9763 int rc = VERR_INTERNAL_ERROR_5;
9764 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
9765 {
9766#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
9767 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
9768 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
9769 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
9770 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9771 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9772 AssertRCReturn(rc, rc);
9773 Log4(("#GP Gst: RIP %#RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u\n", pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode,
9774 pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu)));
9775 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
9776 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
9777 return rc;
9778#else
9779 /* We don't intercept #GP. */
9780 AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n"));
9781 return VERR_VMX_UNEXPECTED_EXCEPTION;
9782#endif
9783 }
9784
9785 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
9786 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
9787
9788 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
9789 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9790 AssertRCReturn(rc, rc);
9791
9792 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
9793 uint32_t cbOp = 0;
9794 PVM pVM = pVCpu->CTX_SUFF(pVM);
9795 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
9796 if (RT_SUCCESS(rc))
9797 {
9798 rc = VINF_SUCCESS;
9799 Assert(cbOp == pDis->cbInstr);
9800 Log4(("#GP Disas OpCode=%u CS:EIP %04x:%#RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
9801 switch (pDis->pCurInstr->uOpcode)
9802 {
9803 case OP_CLI:
9804 {
9805 pMixedCtx->eflags.Bits.u1IF = 0;
9806 pMixedCtx->rip += pDis->cbInstr;
9807 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
9808 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
9809 break;
9810 }
9811
9812 case OP_STI:
9813 {
9814 pMixedCtx->eflags.Bits.u1IF = 1;
9815 pMixedCtx->rip += pDis->cbInstr;
9816 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
9817 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9818 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
9819 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
9820 break;
9821 }
9822
9823 case OP_HLT:
9824 {
9825 rc = VINF_EM_HALT;
9826 pMixedCtx->rip += pDis->cbInstr;
9827 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
9828 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
9829 break;
9830 }
9831
9832 case OP_POPF:
9833 {
9834 Log4(("POPF CS:RIP %04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
9835 uint32_t cbParm = 0;
9836 uint32_t uMask = 0;
9837 if (pDis->fPrefix & DISPREFIX_OPSIZE)
9838 {
9839 cbParm = 4;
9840 uMask = 0xffffffff;
9841 }
9842 else
9843 {
9844 cbParm = 2;
9845 uMask = 0xffff;
9846 }
9847
9848 /* Get the stack pointer & pop the contents of the stack onto EFlags. */
9849 RTGCPTR GCPtrStack = 0;
9850 X86EFLAGS uEflags;
9851 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
9852 &GCPtrStack);
9853 if (RT_SUCCESS(rc))
9854 {
9855 Assert(sizeof(uEflags.u32) >= cbParm);
9856 uEflags.u32 = 0;
9857 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &uEflags.u32, cbParm);
9858 }
9859 if (RT_FAILURE(rc))
9860 {
9861 rc = VERR_EM_INTERPRETER;
9862 break;
9863 }
9864 Log4(("POPF %x -> %#RX64 mask=%x RIP=%#RX64\n", uEflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
9865 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
9866 | (uEflags.u32 & X86_EFL_POPF_BITS & uMask);
9867 /* The RF bit is always cleared by POPF; see Intel Instruction reference for POPF. */
9868 pMixedCtx->eflags.Bits.u1RF = 0;
9869 pMixedCtx->esp += cbParm;
9870 pMixedCtx->esp &= uMask;
9871 pMixedCtx->rip += pDis->cbInstr;
9872 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS;
9873 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
9874 break;
9875 }
9876
9877 case OP_PUSHF:
9878 {
9879 uint32_t cbParm = 0;
9880 uint32_t uMask = 0;
9881 if (pDis->fPrefix & DISPREFIX_OPSIZE)
9882 {
9883 cbParm = 4;
9884 uMask = 0xffffffff;
9885 }
9886 else
9887 {
9888 cbParm = 2;
9889 uMask = 0xffff;
9890 }
9891
9892 /* Get the stack pointer & push the contents of eflags onto the stack. */
9893 RTGCPTR GCPtrStack = 0;
9894 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
9895 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
9896 if (RT_FAILURE(rc))
9897 {
9898 rc = VERR_EM_INTERPRETER;
9899 break;
9900 }
9901 X86EFLAGS uEflags;
9902 uEflags = pMixedCtx->eflags;
9903 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
9904 uEflags.Bits.u1RF = 0;
9905 uEflags.Bits.u1VM = 0;
9906
9907 rc = PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &uEflags.u, cbParm);
9908 if (RT_FAILURE(rc))
9909 {
9910 rc = VERR_EM_INTERPRETER;
9911 break;
9912 }
9913 Log4(("PUSHF %x -> %#RGv\n", uEflags.u, GCPtrStack));
9914 pMixedCtx->esp -= cbParm;
9915 pMixedCtx->esp &= uMask;
9916 pMixedCtx->rip += pDis->cbInstr;
9917 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP;
9918 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
9919 break;
9920 }
9921
9922 case OP_IRET:
9923 {
9924 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
9925 * instruction reference. */
9926 RTGCPTR GCPtrStack = 0;
9927 uint32_t uMask = 0xffff;
9928 uint16_t aIretFrame[3];
9929 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
9930 {
9931 rc = VERR_EM_INTERPRETER;
9932 break;
9933 }
9934 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
9935 &GCPtrStack);
9936 if (RT_SUCCESS(rc))
9937 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame));
9938 if (RT_FAILURE(rc))
9939 {
9940 rc = VERR_EM_INTERPRETER;
9941 break;
9942 }
9943 pMixedCtx->eip = 0;
9944 pMixedCtx->ip = aIretFrame[0];
9945 pMixedCtx->cs.Sel = aIretFrame[1];
9946 pMixedCtx->cs.ValidSel = aIretFrame[1];
9947 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
9948 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
9949 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
9950 pMixedCtx->sp += sizeof(aIretFrame);
9951 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_SEGMENT_REGS | HM_CHANGED_GUEST_RSP
9952 | HM_CHANGED_GUEST_RFLAGS;
9953 Log4(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
9954 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
9955 break;
9956 }
9957
9958 case OP_INT:
9959 {
9960 uint16_t uVector = pDis->Param1.uValue & 0xff;
9961 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
9962 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
9963 break;
9964 }
9965
9966 case OP_INTO:
9967 {
9968 if (pMixedCtx->eflags.Bits.u1OF)
9969 {
9970 hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
9971 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
9972 }
9973 break;
9974 }
9975
9976 default:
9977 {
9978 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
9979 EMCODETYPE_SUPERVISOR);
9980 rc = VBOXSTRICTRC_VAL(rc2);
9981 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
9982 Log4(("#GP rc=%Rrc\n", rc));
9983 break;
9984 }
9985 }
9986 }
9987 else
9988 rc = VERR_EM_INTERPRETER;
9989
9990 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
9991 ("#GP Unexpected rc=%Rrc\n", rc));
9992 return rc;
9993}
9994
9995
9996/**
9997 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
9998 * the exception reported in the VMX transient structure back into the VM.
9999 *
10000 * @remarks Requires uExitIntrInfo in the VMX transient structure to be
10001 * up-to-date.
10002 */
10003static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10004{
10005 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
10006
10007 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
10008 hmR0VmxCheckExitDueToEventDelivery(). */
10009 int rc = hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
10010 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10011 AssertRCReturn(rc, rc);
10012 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
10013
10014 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10015 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
10016 return VINF_SUCCESS;
10017}
10018
10019
10020/**
10021 * VM-exit exception handler for #PF (Page-fault exception).
10022 */
10023static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10024{
10025 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
10026 PVM pVM = pVCpu->CTX_SUFF(pVM);
10027 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10028 rc |= hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
10029 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
10030 AssertRCReturn(rc, rc);
10031
10032#if defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) || defined(HMVMX_ALWAYS_TRAP_PF)
10033 if (pVM->hm.s.fNestedPaging)
10034 {
10035 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
10036 if (RT_LIKELY(!pVmxTransient->fVectoringPF))
10037 {
10038 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
10039 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10040 0 /* cbInstr */, pVmxTransient->uExitIntrErrorCode, pVmxTransient->uExitQualification);
10041 }
10042 else
10043 {
10044 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
10045 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
10046 Log4(("Pending #DF due to vectoring #PF. NP\n"));
10047 }
10048 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
10049 return rc;
10050 }
10051#else
10052 Assert(!pVM->hm.s.fNestedPaging);
10053#endif
10054
10055 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10056 AssertRCReturn(rc, rc);
10057
10058 Log4(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
10059 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode, pMixedCtx->cr3));
10060
10061 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntrErrorCode);
10062 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntrErrorCode, CPUMCTX2CORE(pMixedCtx),
10063 (RTGCPTR)pVmxTransient->uExitQualification);
10064
10065 Log4(("#PF: rc=%Rrc\n", rc));
10066 if (rc == VINF_SUCCESS)
10067 {
10068 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
10069 /** @todo this isn't quite right, what if guest does lgdt with some MMIO
10070 * memory? We don't update the whole state here... */
10071 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
10072 | HM_CHANGED_VMX_GUEST_APIC_STATE;
10073 TRPMResetTrap(pVCpu);
10074 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
10075 return rc;
10076 }
10077 else if (rc == VINF_EM_RAW_GUEST_TRAP)
10078 {
10079 if (!pVmxTransient->fVectoringPF)
10080 {
10081 /* It's a guest page fault and needs to be reflected to the guest. */
10082 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
10083 TRPMResetTrap(pVCpu);
10084 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
10085 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
10086 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10087 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
10088 }
10089 else
10090 {
10091 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
10092 TRPMResetTrap(pVCpu);
10093 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
10094 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
10095 Log4(("#PF: Pending #DF due to vectoring #PF\n"));
10096 }
10097
10098 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
10099 return VINF_SUCCESS;
10100 }
10101
10102 TRPMResetTrap(pVCpu);
10103 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
10104 return rc;
10105}
10106
10107/** @} */
10108
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette