VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/HM.cpp@ 43858

Last change on this file since 43858 was 43805, checked in by vboxsync, 12 years ago

VMM/HM: comment clarifications.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 136.1 KB
Line 
1/* $Id: HM.cpp 43805 2012-11-05 15:30:40Z vboxsync $ */
2/** @file
3 * HM - Intel/AMD VM Hardware Support Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <VBox/vmm/cpum.h>
23#include <VBox/vmm/stam.h>
24#include <VBox/vmm/mm.h>
25#include <VBox/vmm/pdmapi.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/ssm.h>
28#include <VBox/vmm/trpm.h>
29#include <VBox/vmm/dbgf.h>
30#include <VBox/vmm/iom.h>
31#include <VBox/vmm/patm.h>
32#include <VBox/vmm/csam.h>
33#include <VBox/vmm/selm.h>
34#ifdef VBOX_WITH_REM
35# include <VBox/vmm/rem.h>
36#endif
37#include <VBox/vmm/hm_vmx.h>
38#include <VBox/vmm/hm_svm.h>
39#include "HMInternal.h"
40#include <VBox/vmm/vm.h>
41#include <VBox/err.h>
42#include <VBox/param.h>
43
44#include <iprt/assert.h>
45#include <VBox/log.h>
46#include <iprt/asm.h>
47#include <iprt/asm-amd64-x86.h>
48#include <iprt/string.h>
49#include <iprt/env.h>
50#include <iprt/thread.h>
51
52/*******************************************************************************
53* Global Variables *
54*******************************************************************************/
55#ifdef VBOX_WITH_STATISTICS
56# define EXIT_REASON(def, val, str) #def " - " #val " - " str
57# define EXIT_REASON_NIL() NULL
58/** Exit reason descriptions for VT-x, used to describe statistics. */
59static const char * const g_apszVTxExitReasons[MAX_EXITREASON_STAT] =
60{
61 EXIT_REASON(VMX_EXIT_EXCEPTION , 0, "Exception or non-maskable interrupt (NMI)."),
62 EXIT_REASON(VMX_EXIT_EXTERNAL_IRQ , 1, "External interrupt."),
63 EXIT_REASON(VMX_EXIT_TRIPLE_FAULT , 2, "Triple fault."),
64 EXIT_REASON(VMX_EXIT_INIT_SIGNAL , 3, "INIT signal."),
65 EXIT_REASON(VMX_EXIT_SIPI , 4, "Start-up IPI (SIPI)."),
66 EXIT_REASON(VMX_EXIT_IO_SMI_IRQ , 5, "I/O system-management interrupt (SMI)."),
67 EXIT_REASON(VMX_EXIT_SMI_IRQ , 6, "Other SMI."),
68 EXIT_REASON(VMX_EXIT_IRQ_WINDOW , 7, "Interrupt window."),
69 EXIT_REASON_NIL(),
70 EXIT_REASON(VMX_EXIT_TASK_SWITCH , 9, "Task switch."),
71 EXIT_REASON(VMX_EXIT_CPUID , 10, "Guest software attempted to execute CPUID."),
72 EXIT_REASON_NIL(),
73 EXIT_REASON(VMX_EXIT_HLT , 12, "Guest software attempted to execute HLT."),
74 EXIT_REASON(VMX_EXIT_INVD , 13, "Guest software attempted to execute INVD."),
75 EXIT_REASON(VMX_EXIT_INVLPG , 14, "Guest software attempted to execute INVLPG."),
76 EXIT_REASON(VMX_EXIT_RDPMC , 15, "Guest software attempted to execute RDPMC."),
77 EXIT_REASON(VMX_EXIT_RDTSC , 16, "Guest software attempted to execute RDTSC."),
78 EXIT_REASON(VMX_EXIT_RSM , 17, "Guest software attempted to execute RSM in SMM."),
79 EXIT_REASON(VMX_EXIT_VMCALL , 18, "Guest software executed VMCALL."),
80 EXIT_REASON(VMX_EXIT_VMCLEAR , 19, "Guest software executed VMCLEAR."),
81 EXIT_REASON(VMX_EXIT_VMLAUNCH , 20, "Guest software executed VMLAUNCH."),
82 EXIT_REASON(VMX_EXIT_VMPTRLD , 21, "Guest software executed VMPTRLD."),
83 EXIT_REASON(VMX_EXIT_VMPTRST , 22, "Guest software executed VMPTRST."),
84 EXIT_REASON(VMX_EXIT_VMREAD , 23, "Guest software executed VMREAD."),
85 EXIT_REASON(VMX_EXIT_VMRESUME , 24, "Guest software executed VMRESUME."),
86 EXIT_REASON(VMX_EXIT_VMWRITE , 25, "Guest software executed VMWRITE."),
87 EXIT_REASON(VMX_EXIT_VMXOFF , 26, "Guest software executed VMXOFF."),
88 EXIT_REASON(VMX_EXIT_VMXON , 27, "Guest software executed VMXON."),
89 EXIT_REASON(VMX_EXIT_CRX_MOVE , 28, "Control-register accesses."),
90 EXIT_REASON(VMX_EXIT_DRX_MOVE , 29, "Debug-register accesses."),
91 EXIT_REASON(VMX_EXIT_PORT_IO , 30, "I/O instruction."),
92 EXIT_REASON(VMX_EXIT_RDMSR , 31, "RDMSR. Guest software attempted to execute RDMSR."),
93 EXIT_REASON(VMX_EXIT_WRMSR , 32, "WRMSR. Guest software attempted to execute WRMSR."),
94 EXIT_REASON(VMX_EXIT_ERR_INVALID_GUEST_STATE, 33, "VM-entry failure due to invalid guest state."),
95 EXIT_REASON(VMX_EXIT_ERR_MSR_LOAD , 34, "VM-entry failure due to MSR loading."),
96 EXIT_REASON_NIL(),
97 EXIT_REASON(VMX_EXIT_MWAIT , 36, "Guest software executed MWAIT."),
98 EXIT_REASON(VMX_EXIT_MTF , 37, "Monitor Trap Flag."),
99 EXIT_REASON_NIL(),
100 EXIT_REASON(VMX_EXIT_MONITOR , 39, "Guest software attempted to execute MONITOR."),
101 EXIT_REASON(VMX_EXIT_PAUSE , 40, "Guest software attempted to execute PAUSE."),
102 EXIT_REASON(VMX_EXIT_ERR_MACHINE_CHECK , 41, "VM-entry failure due to machine-check."),
103 EXIT_REASON_NIL(),
104 EXIT_REASON(VMX_EXIT_TPR , 43, "TPR below threshold. Guest software executed MOV to CR8."),
105 EXIT_REASON(VMX_EXIT_APIC_ACCESS , 44, "APIC access. Guest software attempted to access memory at a physical address on the APIC-access page."),
106 EXIT_REASON_NIL(),
107 EXIT_REASON(VMX_EXIT_XDTR_ACCESS , 46, "Access to GDTR or IDTR. Guest software attempted to execute LGDT, LIDT, SGDT, or SIDT."),
108 EXIT_REASON(VMX_EXIT_TR_ACCESS , 47, "Access to LDTR or TR. Guest software attempted to execute LLDT, LTR, SLDT, or STR."),
109 EXIT_REASON(VMX_EXIT_EPT_VIOLATION , 48, "EPT violation. An attempt to access memory with a guest-physical address was disallowed by the configuration of the EPT paging structures."),
110 EXIT_REASON(VMX_EXIT_EPT_MISCONFIG , 49, "EPT misconfiguration. An attempt to access memory with a guest-physical address encountered a misconfigured EPT paging-structure entry."),
111 EXIT_REASON(VMX_EXIT_INVEPT , 50, "INVEPT. Guest software attempted to execute INVEPT."),
112 EXIT_REASON(VMX_EXIT_RDTSCP , 51, "Guest software attempted to execute RDTSCP."),
113 EXIT_REASON(VMX_EXIT_PREEMPTION_TIMER , 52, "VMX-preemption timer expired. The preemption timer counted down to zero."),
114 EXIT_REASON(VMX_EXIT_INVVPID , 53, "INVVPID. Guest software attempted to execute INVVPID."),
115 EXIT_REASON(VMX_EXIT_WBINVD , 54, "WBINVD. Guest software attempted to execute WBINVD."),
116 EXIT_REASON(VMX_EXIT_XSETBV , 55, "XSETBV. Guest software attempted to execute XSETBV."),
117 EXIT_REASON_NIL()
118};
119/** Exit reason descriptions for AMD-V, used to describe statistics. */
120static const char * const g_apszAmdVExitReasons[MAX_EXITREASON_STAT] =
121{
122 EXIT_REASON(SVM_EXIT_READ_CR0 , 0, "Read CR0."),
123 EXIT_REASON(SVM_EXIT_READ_CR1 , 1, "Read CR1."),
124 EXIT_REASON(SVM_EXIT_READ_CR2 , 2, "Read CR2."),
125 EXIT_REASON(SVM_EXIT_READ_CR3 , 3, "Read CR3."),
126 EXIT_REASON(SVM_EXIT_READ_CR4 , 4, "Read CR4."),
127 EXIT_REASON(SVM_EXIT_READ_CR5 , 5, "Read CR5."),
128 EXIT_REASON(SVM_EXIT_READ_CR6 , 6, "Read CR6."),
129 EXIT_REASON(SVM_EXIT_READ_CR7 , 7, "Read CR7."),
130 EXIT_REASON(SVM_EXIT_READ_CR8 , 8, "Read CR8."),
131 EXIT_REASON(SVM_EXIT_READ_CR9 , 9, "Read CR9."),
132 EXIT_REASON(SVM_EXIT_READ_CR10 , 10, "Read CR10."),
133 EXIT_REASON(SVM_EXIT_READ_CR11 , 11, "Read CR11."),
134 EXIT_REASON(SVM_EXIT_READ_CR12 , 12, "Read CR12."),
135 EXIT_REASON(SVM_EXIT_READ_CR13 , 13, "Read CR13."),
136 EXIT_REASON(SVM_EXIT_READ_CR14 , 14, "Read CR14."),
137 EXIT_REASON(SVM_EXIT_READ_CR15 , 15, "Read CR15."),
138 EXIT_REASON(SVM_EXIT_WRITE_CR0 , 16, "Write CR0."),
139 EXIT_REASON(SVM_EXIT_WRITE_CR1 , 17, "Write CR1."),
140 EXIT_REASON(SVM_EXIT_WRITE_CR2 , 18, "Write CR2."),
141 EXIT_REASON(SVM_EXIT_WRITE_CR3 , 19, "Write CR3."),
142 EXIT_REASON(SVM_EXIT_WRITE_CR4 , 20, "Write CR4."),
143 EXIT_REASON(SVM_EXIT_WRITE_CR5 , 21, "Write CR5."),
144 EXIT_REASON(SVM_EXIT_WRITE_CR6 , 22, "Write CR6."),
145 EXIT_REASON(SVM_EXIT_WRITE_CR7 , 23, "Write CR7."),
146 EXIT_REASON(SVM_EXIT_WRITE_CR8 , 24, "Write CR8."),
147 EXIT_REASON(SVM_EXIT_WRITE_CR9 , 25, "Write CR9."),
148 EXIT_REASON(SVM_EXIT_WRITE_CR10 , 26, "Write CR10."),
149 EXIT_REASON(SVM_EXIT_WRITE_CR11 , 27, "Write CR11."),
150 EXIT_REASON(SVM_EXIT_WRITE_CR12 , 28, "Write CR12."),
151 EXIT_REASON(SVM_EXIT_WRITE_CR13 , 29, "Write CR13."),
152 EXIT_REASON(SVM_EXIT_WRITE_CR14 , 30, "Write CR14."),
153 EXIT_REASON(SVM_EXIT_WRITE_CR15 , 31, "Write CR15."),
154 EXIT_REASON(SVM_EXIT_READ_DR0 , 32, "Read DR0."),
155 EXIT_REASON(SVM_EXIT_READ_DR1 , 33, "Read DR1."),
156 EXIT_REASON(SVM_EXIT_READ_DR2 , 34, "Read DR2."),
157 EXIT_REASON(SVM_EXIT_READ_DR3 , 35, "Read DR3."),
158 EXIT_REASON(SVM_EXIT_READ_DR4 , 36, "Read DR4."),
159 EXIT_REASON(SVM_EXIT_READ_DR5 , 37, "Read DR5."),
160 EXIT_REASON(SVM_EXIT_READ_DR6 , 38, "Read DR6."),
161 EXIT_REASON(SVM_EXIT_READ_DR7 , 39, "Read DR7."),
162 EXIT_REASON(SVM_EXIT_READ_DR8 , 40, "Read DR8."),
163 EXIT_REASON(SVM_EXIT_READ_DR9 , 41, "Read DR9."),
164 EXIT_REASON(SVM_EXIT_READ_DR10 , 42, "Read DR10."),
165 EXIT_REASON(SVM_EXIT_READ_DR11 , 43, "Read DR11"),
166 EXIT_REASON(SVM_EXIT_READ_DR12 , 44, "Read DR12."),
167 EXIT_REASON(SVM_EXIT_READ_DR13 , 45, "Read DR13."),
168 EXIT_REASON(SVM_EXIT_READ_DR14 , 46, "Read DR14."),
169 EXIT_REASON(SVM_EXIT_READ_DR15 , 47, "Read DR15."),
170 EXIT_REASON(SVM_EXIT_WRITE_DR0 , 48, "Write DR0."),
171 EXIT_REASON(SVM_EXIT_WRITE_DR1 , 49, "Write DR1."),
172 EXIT_REASON(SVM_EXIT_WRITE_DR2 , 50, "Write DR2."),
173 EXIT_REASON(SVM_EXIT_WRITE_DR3 , 51, "Write DR3."),
174 EXIT_REASON(SVM_EXIT_WRITE_DR4 , 52, "Write DR4."),
175 EXIT_REASON(SVM_EXIT_WRITE_DR5 , 53, "Write DR5."),
176 EXIT_REASON(SVM_EXIT_WRITE_DR6 , 54, "Write DR6."),
177 EXIT_REASON(SVM_EXIT_WRITE_DR7 , 55, "Write DR7."),
178 EXIT_REASON(SVM_EXIT_WRITE_DR8 , 56, "Write DR8."),
179 EXIT_REASON(SVM_EXIT_WRITE_DR9 , 57, "Write DR9."),
180 EXIT_REASON(SVM_EXIT_WRITE_DR10 , 58, "Write DR10."),
181 EXIT_REASON(SVM_EXIT_WRITE_DR11 , 59, "Write DR11."),
182 EXIT_REASON(SVM_EXIT_WRITE_DR12 , 60, "Write DR12."),
183 EXIT_REASON(SVM_EXIT_WRITE_DR13 , 61, "Write DR13."),
184 EXIT_REASON(SVM_EXIT_WRITE_DR14 , 62, "Write DR14."),
185 EXIT_REASON(SVM_EXIT_WRITE_DR15 , 63, "Write DR15."),
186 EXIT_REASON(SVM_EXIT_EXCEPTION_0 , 64, "Exception Vector 0 (0x0)."),
187 EXIT_REASON(SVM_EXIT_EXCEPTION_1 , 65, "Exception Vector 1 (0x1)."),
188 EXIT_REASON(SVM_EXIT_EXCEPTION_2 , 66, "Exception Vector 2 (0x2)."),
189 EXIT_REASON(SVM_EXIT_EXCEPTION_3 , 67, "Exception Vector 3 (0x3)."),
190 EXIT_REASON(SVM_EXIT_EXCEPTION_4 , 68, "Exception Vector 4 (0x4)."),
191 EXIT_REASON(SVM_EXIT_EXCEPTION_5 , 69, "Exception Vector 5 (0x5)."),
192 EXIT_REASON(SVM_EXIT_EXCEPTION_6 , 70, "Exception Vector 6 (0x6)."),
193 EXIT_REASON(SVM_EXIT_EXCEPTION_7 , 71, "Exception Vector 7 (0x7)."),
194 EXIT_REASON(SVM_EXIT_EXCEPTION_8 , 72, "Exception Vector 8 (0x8)."),
195 EXIT_REASON(SVM_EXIT_EXCEPTION_9 , 73, "Exception Vector 9 (0x9)."),
196 EXIT_REASON(SVM_EXIT_EXCEPTION_A , 74, "Exception Vector 10 (0xA)."),
197 EXIT_REASON(SVM_EXIT_EXCEPTION_B , 75, "Exception Vector 11 (0xB)."),
198 EXIT_REASON(SVM_EXIT_EXCEPTION_C , 76, "Exception Vector 12 (0xC)."),
199 EXIT_REASON(SVM_EXIT_EXCEPTION_D , 77, "Exception Vector 13 (0xD)."),
200 EXIT_REASON(SVM_EXIT_EXCEPTION_E , 78, "Exception Vector 14 (0xE)."),
201 EXIT_REASON(SVM_EXIT_EXCEPTION_F , 79, "Exception Vector 15 (0xF)."),
202 EXIT_REASON(SVM_EXIT_EXCEPTION_10 , 80, "Exception Vector 16 (0x10)."),
203 EXIT_REASON(SVM_EXIT_EXCEPTION_11 , 81, "Exception Vector 17 (0x11)."),
204 EXIT_REASON(SVM_EXIT_EXCEPTION_12 , 82, "Exception Vector 18 (0x12)."),
205 EXIT_REASON(SVM_EXIT_EXCEPTION_13 , 83, "Exception Vector 19 (0x13)."),
206 EXIT_REASON(SVM_EXIT_EXCEPTION_14 , 84, "Exception Vector 20 (0x14)."),
207 EXIT_REASON(SVM_EXIT_EXCEPTION_15 , 85, "Exception Vector 22 (0x15)."),
208 EXIT_REASON(SVM_EXIT_EXCEPTION_16 , 86, "Exception Vector 22 (0x16)."),
209 EXIT_REASON(SVM_EXIT_EXCEPTION_17 , 87, "Exception Vector 23 (0x17)."),
210 EXIT_REASON(SVM_EXIT_EXCEPTION_18 , 88, "Exception Vector 24 (0x18)."),
211 EXIT_REASON(SVM_EXIT_EXCEPTION_19 , 89, "Exception Vector 25 (0x19)."),
212 EXIT_REASON(SVM_EXIT_EXCEPTION_1A , 90, "Exception Vector 26 (0x1A)."),
213 EXIT_REASON(SVM_EXIT_EXCEPTION_1B , 91, "Exception Vector 27 (0x1B)."),
214 EXIT_REASON(SVM_EXIT_EXCEPTION_1C , 92, "Exception Vector 28 (0x1C)."),
215 EXIT_REASON(SVM_EXIT_EXCEPTION_1D , 93, "Exception Vector 29 (0x1D)."),
216 EXIT_REASON(SVM_EXIT_EXCEPTION_1E , 94, "Exception Vector 30 (0x1E)."),
217 EXIT_REASON(SVM_EXIT_EXCEPTION_1F , 95, "Exception Vector 31 (0x1F)."),
218 EXIT_REASON(SVM_EXIT_INTR , 96, "Physical maskable interrupt."),
219 EXIT_REASON(SVM_EXIT_NMI , 97, "Physical non-maskable interrupt."),
220 EXIT_REASON(SVM_EXIT_SMI , 98, "System management interrupt."),
221 EXIT_REASON(SVM_EXIT_INIT , 99, "Physical INIT signal."),
222 EXIT_REASON(SVM_EXIT_VINTR ,100, "Virtual interrupt."),
223 EXIT_REASON(SVM_EXIT_CR0_SEL_WRITE ,101, "Write to CR0 that changed any bits other than CR0.TS or CR0.MP."),
224 EXIT_REASON(SVM_EXIT_IDTR_READ ,102, "Read IDTR"),
225 EXIT_REASON(SVM_EXIT_GDTR_READ ,103, "Read GDTR"),
226 EXIT_REASON(SVM_EXIT_LDTR_READ ,104, "Read LDTR."),
227 EXIT_REASON(SVM_EXIT_TR_READ ,105, "Read TR."),
228 EXIT_REASON(SVM_EXIT_TR_READ ,106, "Write IDTR."),
229 EXIT_REASON(SVM_EXIT_TR_READ ,107, "Write GDTR."),
230 EXIT_REASON(SVM_EXIT_TR_READ ,108, "Write LDTR."),
231 EXIT_REASON(SVM_EXIT_TR_READ ,109, "Write TR."),
232 EXIT_REASON(SVM_EXIT_RDTSC ,110, "RDTSC instruction."),
233 EXIT_REASON(SVM_EXIT_RDPMC ,111, "RDPMC instruction."),
234 EXIT_REASON(SVM_EXIT_PUSHF ,112, "PUSHF instruction."),
235 EXIT_REASON(SVM_EXIT_POPF ,113, "POPF instruction."),
236 EXIT_REASON(SVM_EXIT_CPUID ,114, "CPUID instruction."),
237 EXIT_REASON(SVM_EXIT_RSM ,115, "RSM instruction."),
238 EXIT_REASON(SVM_EXIT_IRET ,116, "IRET instruction."),
239 EXIT_REASON(SVM_EXIT_SWINT ,117, "Software interrupt (INTn instructions)."),
240 EXIT_REASON(SVM_EXIT_INVD ,118, "INVD instruction."),
241 EXIT_REASON(SVM_EXIT_PAUSE ,119, "PAUSE instruction."),
242 EXIT_REASON(SVM_EXIT_HLT ,120, "HLT instruction."),
243 EXIT_REASON(SVM_EXIT_INVLPG ,121, "INVLPG instruction."),
244 EXIT_REASON(SVM_EXIT_INVLPGA ,122, "INVLPGA instruction."),
245 EXIT_REASON(SVM_EXIT_IOIO ,123, "IN/OUT accessing protected port (EXITINFO1 field provides more information)."),
246 EXIT_REASON(SVM_EXIT_MSR ,124, "RDMSR or WRMSR access to protected MSR."),
247 EXIT_REASON(SVM_EXIT_TASK_SWITCH ,125, "Task switch."),
248 EXIT_REASON(SVM_EXIT_FERR_FREEZE ,126, "FP legacy handling enabled, and processor is frozen in an x87/mmx instruction waiting for an interrupt"),
249 EXIT_REASON(SVM_EXIT_SHUTDOWN ,127, "Shutdown."),
250 EXIT_REASON(SVM_EXIT_VMRUN ,128, "VMRUN instruction."),
251 EXIT_REASON(SVM_EXIT_VMMCALL ,129, "VMCALL instruction."),
252 EXIT_REASON(SVM_EXIT_VMLOAD ,130, "VMLOAD instruction."),
253 EXIT_REASON(SVM_EXIT_VMSAVE ,131, "VMSAVE instruction."),
254 EXIT_REASON(SVM_EXIT_STGI ,132, "STGI instruction."),
255 EXIT_REASON(SVM_EXIT_CLGI ,133, "CLGI instruction."),
256 EXIT_REASON(SVM_EXIT_SKINIT ,134, "SKINIT instruction."),
257 EXIT_REASON(SVM_EXIT_RDTSCP ,135, "RDTSCP instruction."),
258 EXIT_REASON(SVM_EXIT_ICEBP ,136, "ICEBP instruction."),
259 EXIT_REASON(SVM_EXIT_WBINVD ,137, "WBINVD instruction."),
260 EXIT_REASON(SVM_EXIT_MONITOR ,138, "MONITOR instruction."),
261 EXIT_REASON(SVM_EXIT_MWAIT_UNCOND ,139, "MWAIT instruction unconditional."),
262 EXIT_REASON(SVM_EXIT_MWAIT_ARMED ,140, "MWAIT instruction when armed."),
263 EXIT_REASON(SVM_EXIT_NPF ,1024, "Nested paging: host-level page fault occurred (EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault)."),
264 EXIT_REASON_NIL()
265};
266# undef EXIT_REASON
267# undef EXIT_REASON_NIL
268#endif /* VBOX_WITH_STATISTICS */
269
270/*******************************************************************************
271* Internal Functions *
272*******************************************************************************/
273static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM);
274static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
275static int hmR3InitCPU(PVM pVM);
276static int hmR3InitFinalizeR0(PVM pVM);
277static int hmR3TermCPU(PVM pVM);
278
279
280/**
281 * Initializes the HM.
282 *
283 * @returns VBox status code.
284 * @param pVM Pointer to the VM.
285 */
286VMMR3DECL(int) HMR3Init(PVM pVM)
287{
288 LogFlow(("HMR3Init\n"));
289
290 /*
291 * Assert alignment and sizes.
292 */
293 AssertCompileMemberAlignment(VM, hm.s, 32);
294 AssertCompile(sizeof(pVM->hm.s) <= sizeof(pVM->hm.padding));
295
296 /* Some structure checks. */
297 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.EventInject) == 0xA8, ("ctrl.EventInject offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.EventInject)));
298 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo) == 0x88, ("ctrl.ExitIntInfo offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo)));
299 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl) == 0x58, ("ctrl.TLBCtrl offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl)));
300
301 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest) == 0x400, ("guest offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest)));
302 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.TR) == 0x490, ("guest.TR offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.TR)));
303 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8CPL) == 0x4CB, ("guest.u8CPL offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8CPL)));
304 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64EFER) == 0x4D0, ("guest.u64EFER offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64EFER)));
305 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64CR4) == 0x548, ("guest.u64CR4 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64CR4)));
306 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64RIP) == 0x578, ("guest.u64RIP offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64RIP)));
307 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64RSP) == 0x5D8, ("guest.u64RSP offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64RSP)));
308 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64CR2) == 0x640, ("guest.u64CR2 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64CR2)));
309 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64GPAT) == 0x668, ("guest.u64GPAT offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64GPAT)));
310 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64LASTEXCPTO) == 0x690, ("guest.u64LASTEXCPTO offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64LASTEXCPTO)));
311 AssertReleaseMsg(sizeof(SVM_VMCB) == 0x1000, ("SVM_VMCB size = %x\n", sizeof(SVM_VMCB)));
312
313 /*
314 * Register the saved state data unit.
315 */
316 int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HM_SSM_VERSION, sizeof(HM),
317 NULL, NULL, NULL,
318 NULL, hmR3Save, NULL,
319 NULL, hmR3Load, NULL);
320 if (RT_FAILURE(rc))
321 return rc;
322
323 /* Misc initialisation. */
324 pVM->hm.s.vmx.fSupported = false;
325 pVM->hm.s.svm.fSupported = false;
326 pVM->hm.s.vmx.fEnabled = false;
327 pVM->hm.s.svm.fEnabled = false;
328
329 pVM->hm.s.fNestedPaging = false;
330 pVM->hm.s.fLargePages = false;
331
332 /* Disabled by default. */
333 pVM->fHMEnabled = false;
334
335 /*
336 * Check CFGM options.
337 */
338 PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
339 PCFGMNODE pHWVirtExt = CFGMR3GetChild(pRoot, "HWVirtExt/");
340 /* Nested paging: disabled by default. */
341 rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableNestedPaging", &pVM->hm.s.fAllowNestedPaging, false);
342 AssertRC(rc);
343
344 /* Large pages: disabled by default. */
345 rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableLargePages", &pVM->hm.s.fLargePages, false);
346 AssertRC(rc);
347
348 /* VT-x VPID: disabled by default. */
349 rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableVPID", &pVM->hm.s.vmx.fAllowVpid, false);
350 AssertRC(rc);
351
352 /* HM support must be explicitely enabled in the configuration file. */
353 rc = CFGMR3QueryBoolDef(pHWVirtExt, "Enabled", &pVM->hm.s.fAllowed, false);
354 AssertRC(rc);
355
356 /* TPR patching for 32 bits (Windows) guests with IO-APIC: disabled by default. */
357 rc = CFGMR3QueryBoolDef(pHWVirtExt, "TPRPatchingEnabled", &pVM->hm.s.fTRPPatchingAllowed, false);
358 AssertRC(rc);
359
360#ifdef RT_OS_DARWIN
361 if (VMMIsHwVirtExtForced(pVM) != pVM->hm.s.fAllowed)
362#else
363 if (VMMIsHwVirtExtForced(pVM) && !pVM->hm.s.fAllowed)
364#endif
365 {
366 AssertLogRelMsgFailed(("VMMIsHwVirtExtForced=%RTbool fAllowed=%RTbool\n",
367 VMMIsHwVirtExtForced(pVM), pVM->hm.s.fAllowed));
368 return VERR_HM_CONFIG_MISMATCH;
369 }
370
371 if (VMMIsHwVirtExtForced(pVM))
372 pVM->fHMEnabled = true;
373
374#if HC_ARCH_BITS == 32
375 /*
376 * 64-bit mode is configurable and it depends on both the kernel mode and VT-x.
377 * (To use the default, don't set 64bitEnabled in CFGM.)
378 */
379 rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hm.s.fAllow64BitGuests, false);
380 AssertLogRelRCReturn(rc, rc);
381 if (pVM->hm.s.fAllow64BitGuests)
382 {
383# ifdef RT_OS_DARWIN
384 if (!VMMIsHwVirtExtForced(pVM))
385# else
386 if (!pVM->hm.s.fAllowed)
387# endif
388 return VM_SET_ERROR(pVM, VERR_INVALID_PARAMETER, "64-bit guest support was requested without also enabling HWVirtEx (VT-x/AMD-V).");
389 }
390#else
391 /*
392 * On 64-bit hosts 64-bit guest support is enabled by default, but allow this to be overridden
393 * via VBoxInternal/HWVirtExt/64bitEnabled=0. (ConsoleImpl2.cpp doesn't set this to false for 64-bit.)*
394 */
395 rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hm.s.fAllow64BitGuests, true);
396 AssertLogRelRCReturn(rc, rc);
397#endif
398
399
400 /*
401 * Determine the init method for AMD-V and VT-x; either one global init for each host CPU
402 * or local init each time we wish to execute guest code.
403 *
404 * Default false for Mac OS X and Windows due to the higher risk of conflicts with other hypervisors.
405 */
406 rc = CFGMR3QueryBoolDef(pHWVirtExt, "Exclusive", &pVM->hm.s.fGlobalInit,
407#if defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS)
408 false
409#else
410 true
411#endif
412 );
413
414 /* Max number of resume loops. */
415 rc = CFGMR3QueryU32Def(pHWVirtExt, "MaxResumeLoops", &pVM->hm.s.cMaxResumeLoops, 0 /* set by R0 later */);
416 AssertRC(rc);
417
418 return rc;
419}
420
421
422/**
423 * Initializes the per-VCPU HM.
424 *
425 * @returns VBox status code.
426 * @param pVM Pointer to the VM.
427 */
428static int hmR3InitCPU(PVM pVM)
429{
430 LogFlow(("HMR3InitCPU\n"));
431
432 for (VMCPUID i = 0; i < pVM->cCpus; i++)
433 {
434 PVMCPU pVCpu = &pVM->aCpus[i];
435
436 pVCpu->hm.s.fActive = false;
437 }
438
439#ifdef VBOX_WITH_STATISTICS
440 STAM_REG(pVM, &pVM->hm.s.StatTprPatchSuccess, STAMTYPE_COUNTER, "/HM/TPR/Patch/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
441 STAM_REG(pVM, &pVM->hm.s.StatTprPatchFailure, STAMTYPE_COUNTER, "/HM/TPR/Patch/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
442 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceSuccess, STAMTYPE_COUNTER, "/HM/TPR/Replace/Success",STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
443 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceFailure, STAMTYPE_COUNTER, "/HM/TPR/Replace/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
444
445 /*
446 * Statistics.
447 */
448 for (VMCPUID i = 0; i < pVM->cCpus; i++)
449 {
450 PVMCPU pVCpu = &pVM->aCpus[i];
451 int rc;
452
453 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
454 "Profiling of RTMpPokeCpu",
455 "/PROF/HM/CPU%d/Poke", i);
456 AssertRC(rc);
457 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatSpinPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
458 "Profiling of poke wait",
459 "/PROF/HM/CPU%d/PokeWait", i);
460 AssertRC(rc);
461 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatSpinPokeFailed, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
462 "Profiling of poke wait when RTMpPokeCpu fails",
463 "/PROF/HM/CPU%d/PokeWaitFailed", i);
464 AssertRC(rc);
465 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatEntry, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
466 "Profiling of VMXR0RunGuestCode entry",
467 "/PROF/HM/CPU%d/SwitchToGC", i);
468 AssertRC(rc);
469 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
470 "Profiling of VMXR0RunGuestCode exit part 1",
471 "/PROF/HM/CPU%d/SwitchFromGC_1", i);
472 AssertRC(rc);
473 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
474 "Profiling of VMXR0RunGuestCode exit part 2",
475 "/PROF/HM/CPU%d/SwitchFromGC_2", i);
476 AssertRC(rc);
477# if 1 /* temporary for tracking down darwin holdup. */
478 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit2Sub1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
479 "Temporary - I/O",
480 "/PROF/HM/CPU%d/SwitchFromGC_2/Sub1", i);
481 AssertRC(rc);
482 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit2Sub2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
483 "Temporary - CRx RWs",
484 "/PROF/HM/CPU%d/SwitchFromGC_2/Sub2", i);
485 AssertRC(rc);
486 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit2Sub3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
487 "Temporary - Exceptions",
488 "/PROF/HM/CPU%d/SwitchFromGC_2/Sub3", i);
489 AssertRC(rc);
490# endif
491 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatInGC, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
492 "Profiling of vmlaunch",
493 "/PROF/HM/CPU%d/InGC", i);
494 AssertRC(rc);
495
496# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
497 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatWorldSwitch3264, STAMTYPE_PROFILE, STAMVISIBILITY_USED,
498 STAMUNIT_TICKS_PER_CALL, "Profiling of the 32/64 switcher",
499 "/PROF/HM/CPU%d/Switcher3264", i);
500 AssertRC(rc);
501# endif
502
503# define HM_REG_COUNTER(a, b) \
504 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Profiling of vmlaunch", b, i); \
505 AssertRC(rc);
506
507 HM_REG_COUNTER(&pVCpu->hm.s.StatExitShadowNM, "/HM/CPU%d/Exit/Trap/Shw/#NM");
508 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestNM, "/HM/CPU%d/Exit/Trap/Gst/#NM");
509 HM_REG_COUNTER(&pVCpu->hm.s.StatExitShadowPF, "/HM/CPU%d/Exit/Trap/Shw/#PF");
510 HM_REG_COUNTER(&pVCpu->hm.s.StatExitShadowPFEM, "/HM/CPU%d/Exit/Trap/Shw/#PF-EM");
511 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestPF, "/HM/CPU%d/Exit/Trap/Gst/#PF");
512 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestUD, "/HM/CPU%d/Exit/Trap/Gst/#UD");
513 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestSS, "/HM/CPU%d/Exit/Trap/Gst/#SS");
514 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestNP, "/HM/CPU%d/Exit/Trap/Gst/#NP");
515 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestGP, "/HM/CPU%d/Exit/Trap/Gst/#GP");
516 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestMF, "/HM/CPU%d/Exit/Trap/Gst/#MF");
517 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestDE, "/HM/CPU%d/Exit/Trap/Gst/#DE");
518 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestDB, "/HM/CPU%d/Exit/Trap/Gst/#DB");
519 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestBP, "/HM/CPU%d/Exit/Trap/Gst/#BP");
520 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXF, "/HM/CPU%d/Exit/Trap/Gst/#XF");
521 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXcpUnk, "/HM/CPU%d/Exit/Trap/Gst/Other");
522 HM_REG_COUNTER(&pVCpu->hm.s.StatExitInvlpg, "/HM/CPU%d/Exit/Instr/Invlpg");
523 HM_REG_COUNTER(&pVCpu->hm.s.StatExitInvd, "/HM/CPU%d/Exit/Instr/Invd");
524 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCpuid, "/HM/CPU%d/Exit/Instr/Cpuid");
525 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdtsc, "/HM/CPU%d/Exit/Instr/Rdtsc");
526 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdtscp, "/HM/CPU%d/Exit/Instr/Rdtscp");
527 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdpmc, "/HM/CPU%d/Exit/Instr/Rdpmc");
528 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdmsr, "/HM/CPU%d/Exit/Instr/Rdmsr");
529 HM_REG_COUNTER(&pVCpu->hm.s.StatExitWrmsr, "/HM/CPU%d/Exit/Instr/Wrmsr");
530 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMwait, "/HM/CPU%d/Exit/Instr/Mwait");
531 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMonitor, "/HM/CPU%d/Exit/Instr/Monitor");
532 HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxWrite, "/HM/CPU%d/Exit/Instr/DR/Write");
533 HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxRead, "/HM/CPU%d/Exit/Instr/DR/Read");
534 HM_REG_COUNTER(&pVCpu->hm.s.StatExitClts, "/HM/CPU%d/Exit/Instr/CLTS");
535 HM_REG_COUNTER(&pVCpu->hm.s.StatExitLMSW, "/HM/CPU%d/Exit/Instr/LMSW");
536 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCli, "/HM/CPU%d/Exit/Instr/Cli");
537 HM_REG_COUNTER(&pVCpu->hm.s.StatExitSti, "/HM/CPU%d/Exit/Instr/Sti");
538 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPushf, "/HM/CPU%d/Exit/Instr/Pushf");
539 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPopf, "/HM/CPU%d/Exit/Instr/Popf");
540 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIret, "/HM/CPU%d/Exit/Instr/Iret");
541 HM_REG_COUNTER(&pVCpu->hm.s.StatExitInt, "/HM/CPU%d/Exit/Instr/Int");
542 HM_REG_COUNTER(&pVCpu->hm.s.StatExitHlt, "/HM/CPU%d/Exit/Instr/Hlt");
543 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOWrite, "/HM/CPU%d/Exit/IO/Write");
544 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIORead, "/HM/CPU%d/Exit/IO/Read");
545 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringWrite, "/HM/CPU%d/Exit/IO/WriteString");
546 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringRead, "/HM/CPU%d/Exit/IO/ReadString");
547 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIrqWindow, "/HM/CPU%d/Exit/IrqWindow");
548 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMaxResume, "/HM/CPU%d/Exit/MaxResume");
549 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPreemptPending, "/HM/CPU%d/Exit/PreemptPending");
550 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMtf, "/HM/CPU%d/Exit/MonitorTrapFlag");
551
552 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchGuestIrq, "/HM/CPU%d/Switch/IrqPending");
553 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchToR3, "/HM/CPU%d/Switch/ToR3");
554
555 HM_REG_COUNTER(&pVCpu->hm.s.StatIntInject, "/HM/CPU%d/Irq/Inject");
556 HM_REG_COUNTER(&pVCpu->hm.s.StatIntReinject, "/HM/CPU%d/Irq/Reinject");
557 HM_REG_COUNTER(&pVCpu->hm.s.StatPendingHostIrq, "/HM/CPU%d/Irq/PendingOnHost");
558
559 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPage, "/HM/CPU%d/Flush/Page");
560 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPageManual, "/HM/CPU%d/Flush/Page/Virt");
561 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPhysPageManual, "/HM/CPU%d/Flush/Page/Phys");
562 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlb, "/HM/CPU%d/Flush/TLB");
563 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlbManual, "/HM/CPU%d/Flush/TLB/Manual");
564 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlbCRxChange, "/HM/CPU%d/Flush/TLB/CRx");
565 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPageInvlpg, "/HM/CPU%d/Flush/Page/Invlpg");
566 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlbWorldSwitch, "/HM/CPU%d/Flush/TLB/Switch");
567 HM_REG_COUNTER(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch, "/HM/CPU%d/Flush/TLB/Skipped");
568 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushAsid, "/HM/CPU%d/Flush/TLB/ASID");
569 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushNestedPaging, "/HM/CPU%d/Flush/TLB/NestedPaging");
570 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlbInvlpga, "/HM/CPU%d/Flush/TLB/PhysInvl");
571 HM_REG_COUNTER(&pVCpu->hm.s.StatTlbShootdown, "/HM/CPU%d/Flush/Shootdown/Page");
572 HM_REG_COUNTER(&pVCpu->hm.s.StatTlbShootdownFlush, "/HM/CPU%d/Flush/Shootdown/TLB");
573
574 HM_REG_COUNTER(&pVCpu->hm.s.StatTscOffset, "/HM/CPU%d/TSC/Offset");
575 HM_REG_COUNTER(&pVCpu->hm.s.StatTscIntercept, "/HM/CPU%d/TSC/Intercept");
576 HM_REG_COUNTER(&pVCpu->hm.s.StatTscInterceptOverFlow, "/HM/CPU%d/TSC/InterceptOverflow");
577
578 HM_REG_COUNTER(&pVCpu->hm.s.StatDRxArmed, "/HM/CPU%d/Debug/Armed");
579 HM_REG_COUNTER(&pVCpu->hm.s.StatDRxContextSwitch, "/HM/CPU%d/Debug/ContextSwitch");
580 HM_REG_COUNTER(&pVCpu->hm.s.StatDRxIoCheck, "/HM/CPU%d/Debug/IOCheck");
581
582 HM_REG_COUNTER(&pVCpu->hm.s.StatLoadMinimal, "/HM/CPU%d/Load/Minimal");
583 HM_REG_COUNTER(&pVCpu->hm.s.StatLoadFull, "/HM/CPU%d/Load/Full");
584
585#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
586 HM_REG_COUNTER(&pVCpu->hm.s.StatFpu64SwitchBack, "/HM/CPU%d/Switch64/Fpu");
587 HM_REG_COUNTER(&pVCpu->hm.s.StatDebug64SwitchBack, "/HM/CPU%d/Switch64/Debug");
588#endif
589
590 for (unsigned j = 0; j < RT_ELEMENTS(pVCpu->hm.s.StatExitCRxWrite); j++)
591 {
592 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitCRxWrite[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
593 STAMUNIT_OCCURENCES, "Profiling of CRx writes",
594 "/HM/CPU%d/Exit/Instr/CR/Write/%x", i, j);
595 AssertRC(rc);
596 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitCRxRead[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
597 STAMUNIT_OCCURENCES, "Profiling of CRx reads",
598 "/HM/CPU%d/Exit/Instr/CR/Read/%x", i, j);
599 AssertRC(rc);
600 }
601
602#undef HM_REG_COUNTER
603
604 pVCpu->hm.s.paStatExitReason = NULL;
605
606 rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT*sizeof(*pVCpu->hm.s.paStatExitReason), 0, MM_TAG_HM,
607 (void **)&pVCpu->hm.s.paStatExitReason);
608 AssertRC(rc);
609 if (RT_SUCCESS(rc))
610 {
611 const char * const *papszDesc = ASMIsIntelCpu() ? &g_apszVTxExitReasons[0] : &g_apszAmdVExitReasons[0];
612 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
613 {
614 if (papszDesc[j])
615 {
616 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
617 STAMUNIT_OCCURENCES, papszDesc[j], "/HM/CPU%d/Exit/Reason/%02x", i, j);
618 AssertRC(rc);
619 }
620 }
621 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitReasonNpf, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
622 "Nested page fault", "/HM/CPU%d/Exit/Reason/#NPF", i);
623 AssertRC(rc);
624 }
625 pVCpu->hm.s.paStatExitReasonR0 = MMHyperR3ToR0(pVM, pVCpu->hm.s.paStatExitReason);
626# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
627 Assert(pVCpu->hm.s.paStatExitReasonR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
628# else
629 Assert(pVCpu->hm.s.paStatExitReasonR0 != NIL_RTR0PTR);
630# endif
631
632 rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * 256, 8, MM_TAG_HM, (void **)&pVCpu->hm.s.paStatInjectedIrqs);
633 AssertRCReturn(rc, rc);
634 pVCpu->hm.s.paStatInjectedIrqsR0 = MMHyperR3ToR0(pVM, pVCpu->hm.s.paStatInjectedIrqs);
635# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
636 Assert(pVCpu->hm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
637# else
638 Assert(pVCpu->hm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR);
639# endif
640 for (unsigned j = 0; j < 255; j++)
641 {
642 STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
643 "Forwarded interrupts.",
644 (j < 0x20) ? "/HM/CPU%d/Interrupt/Trap/%02X" : "/HM/CPU%d/Interrupt/IRQ/%02X", i, j);
645 }
646
647 }
648#endif /* VBOX_WITH_STATISTICS */
649
650#ifdef VBOX_WITH_CRASHDUMP_MAGIC
651 /* Magic marker for searching in crash dumps. */
652 for (VMCPUID i = 0; i < pVM->cCpus; i++)
653 {
654 PVMCPU pVCpu = &pVM->aCpus[i];
655
656 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
657 strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
658 pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
659 }
660#endif
661 return VINF_SUCCESS;
662}
663
664
665/**
666 * Called when a init phase has completed.
667 *
668 * @returns VBox status code.
669 * @param pVM The VM.
670 * @param enmWhat The phase that completed.
671 */
672VMMR3_INT_DECL(int) HMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
673{
674 switch (enmWhat)
675 {
676 case VMINITCOMPLETED_RING3:
677 return hmR3InitCPU(pVM);
678 case VMINITCOMPLETED_RING0:
679 return hmR3InitFinalizeR0(pVM);
680 default:
681 return VINF_SUCCESS;
682 }
683}
684
685
686/**
687 * Turns off normal raw mode features.
688 *
689 * @param pVM Pointer to the VM.
690 */
691static void hmR3DisableRawMode(PVM pVM)
692{
693 /* Disable PATM & CSAM. */
694 PATMR3AllowPatching(pVM, false);
695 CSAMDisableScanning(pVM);
696
697 /* Turn off IDT/LDT/GDT and TSS monitoring and sycing. */
698 SELMR3DisableMonitoring(pVM);
699 TRPMR3DisableMonitoring(pVM);
700
701 /* Disable the switcher code (safety precaution). */
702 VMMR3DisableSwitcher(pVM);
703
704 /* Disable mapping of the hypervisor into the shadow page table. */
705 PGMR3MappingsDisable(pVM);
706
707 /* Disable the switcher */
708 VMMR3DisableSwitcher(pVM);
709
710 /* Reinit the paging mode to force the new shadow mode. */
711 for (VMCPUID i = 0; i < pVM->cCpus; i++)
712 {
713 PVMCPU pVCpu = &pVM->aCpus[i];
714
715 PGMR3ChangeMode(pVM, pVCpu, PGMMODE_REAL);
716 }
717}
718
719
720/**
721 * Initialize VT-x or AMD-V.
722 *
723 * @returns VBox status code.
724 * @param pVM Pointer to the VM.
725 */
726static int hmR3InitFinalizeR0(PVM pVM)
727{
728 int rc;
729
730 /*
731 * Hack to allow users to work around broken BIOSes that incorrectly set EFER.SVME, which makes us believe somebody else
732 * is already using AMD-V.
733 */
734 if ( !pVM->hm.s.vmx.fSupported
735 && !pVM->hm.s.svm.fSupported
736 && pVM->hm.s.lLastError == VERR_SVM_IN_USE /* implies functional AMD-V */
737 && RTEnvExist("VBOX_HWVIRTEX_IGNORE_SVM_IN_USE"))
738 {
739 LogRel(("HM: VBOX_HWVIRTEX_IGNORE_SVM_IN_USE active!\n"));
740 pVM->hm.s.svm.fSupported = true;
741 pVM->hm.s.svm.fIgnoreInUseError = true;
742 }
743 else
744 if ( !pVM->hm.s.vmx.fSupported
745 && !pVM->hm.s.svm.fSupported)
746 {
747 LogRel(("HM: No VT-x or AMD-V CPU extension found. Reason %Rrc\n", pVM->hm.s.lLastError));
748 LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.msr.feature_ctrl));
749
750 if (VMMIsHwVirtExtForced(pVM))
751 {
752 switch (pVM->hm.s.lLastError)
753 {
754 case VERR_VMX_NO_VMX:
755 return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is not available.");
756 case VERR_VMX_IN_VMX_ROOT_MODE:
757 return VM_SET_ERROR(pVM, VERR_VMX_IN_VMX_ROOT_MODE, "VT-x is being used by another hypervisor.");
758 case VERR_SVM_IN_USE:
759 return VM_SET_ERROR(pVM, VERR_SVM_IN_USE, "AMD-V is being used by another hypervisor.");
760 case VERR_SVM_NO_SVM:
761 return VM_SET_ERROR(pVM, VERR_SVM_NO_SVM, "AMD-V is not available.");
762 case VERR_SVM_DISABLED:
763 return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS.");
764 default:
765 return pVM->hm.s.lLastError;
766 }
767 }
768 return VINF_SUCCESS;
769 }
770
771 if (pVM->hm.s.vmx.fSupported)
772 {
773 rc = SUPR3QueryVTxSupported();
774 if (RT_FAILURE(rc))
775 {
776#ifdef RT_OS_LINUX
777 LogRel(("HM: The host kernel does not support VT-x -- Linux 2.6.13 or newer required!\n"));
778#else
779 LogRel(("HM: The host kernel does not support VT-x!\n"));
780#endif
781 if ( pVM->cCpus > 1
782 || VMMIsHwVirtExtForced(pVM))
783 return rc;
784
785 /* silently fall back to raw mode */
786 return VINF_SUCCESS;
787 }
788 }
789
790 if (!pVM->hm.s.fAllowed)
791 return VINF_SUCCESS; /* nothing to do */
792
793 /* Enable VT-x or AMD-V on all host CPUs. */
794 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HM_ENABLE, 0, NULL);
795 if (RT_FAILURE(rc))
796 {
797 LogRel(("HMR3InitFinalize: SUPR3CallVMMR0Ex VMMR0_DO_HM_ENABLE failed with %Rrc\n", rc));
798 return rc;
799 }
800 Assert(!pVM->fHMEnabled || VMMIsHwVirtExtForced(pVM));
801
802 pVM->hm.s.fHasIoApic = PDMHasIoApic(pVM);
803 /* No TPR patching is required when the IO-APIC is not enabled for this VM. (Main should have taken care of this already) */
804 if (!pVM->hm.s.fHasIoApic)
805 {
806 Assert(!pVM->hm.s.fTRPPatchingAllowed); /* paranoia */
807 pVM->hm.s.fTRPPatchingAllowed = false;
808 }
809
810 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
811 if (pVM->hm.s.vmx.fSupported)
812 {
813 Log(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported));
814
815 if ( pVM->hm.s.fInitialized == false
816 && pVM->hm.s.vmx.msr.feature_ctrl != 0)
817 {
818 uint64_t val;
819 RTGCPHYS GCPhys = 0;
820
821 LogRel(("HM: Host CR4=%08X\n", pVM->hm.s.vmx.hostCR4));
822 LogRel(("HM: MSR_IA32_FEATURE_CONTROL = %RX64\n", pVM->hm.s.vmx.msr.feature_ctrl));
823 LogRel(("HM: MSR_IA32_VMX_BASIC_INFO = %RX64\n", pVM->hm.s.vmx.msr.vmx_basic_info));
824 LogRel(("HM: VMCS id = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info)));
825 LogRel(("HM: VMCS size = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.vmx_basic_info)));
826 LogRel(("HM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hm.s.vmx.msr.vmx_basic_info) ? "< 4 GB" : "None"));
827 LogRel(("HM: VMCS memory type = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hm.s.vmx.msr.vmx_basic_info)));
828 LogRel(("HM: Dual monitor treatment = %d\n", MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hm.s.vmx.msr.vmx_basic_info)));
829
830 LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS = %RX64\n", pVM->hm.s.vmx.msr.vmx_pin_ctls.u));
831 val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
832 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
833 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT\n"));
834 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
835 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT\n"));
836 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI)
837 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI\n"));
838 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
839 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER\n"));
840 val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
841 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
842 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT *must* be set\n"));
843 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
844 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT *must* be set\n"));
845 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI)
846 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI *must* be set\n"));
847 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
848 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER *must* be set\n"));
849
850 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS = %RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls.u));
851 val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
852 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
853 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT\n"));
854 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
855 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET\n"));
856 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
857 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT\n"));
858 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
859 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT\n"));
860 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
861 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT\n"));
862 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
863 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT\n"));
864 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
865 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT\n"));
866 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT)
867 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT\n"));
868 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT)
869 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT\n"));
870 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
871 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT\n"));
872 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
873 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT\n"));
874 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
875 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW\n"));
876 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT)
877 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT\n"));
878 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
879 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT\n"));
880 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
881 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT\n"));
882 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
883 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS\n"));
884 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
885 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG\n"));
886 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
887 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS\n"));
888 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
889 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT\n"));
890 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
891 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT\n"));
892 if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
893 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL\n"));
894
895 val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
896 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
897 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT *must* be set\n"));
898 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
899 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET *must* be set\n"));
900 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
901 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT *must* be set\n"));
902 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
903 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT *must* be set\n"));
904 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
905 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT *must* be set\n"));
906 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
907 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT *must* be set\n"));
908 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
909 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT *must* be set\n"));
910 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT)
911 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT *must* be set\n"));
912 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT)
913 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT *must* be set\n"));
914 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
915 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT *must* be set\n"));
916 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
917 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT *must* be set\n"));
918 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
919 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW *must* be set\n"));
920 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT)
921 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT *must* be set\n"));
922 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
923 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT *must* be set\n"));
924 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
925 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT *must* be set\n"));
926 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
927 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS *must* be set\n"));
928 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
929 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG *must* be set\n"));
930 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
931 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS *must* be set\n"));
932 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
933 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT *must* be set\n"));
934 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
935 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT *must* be set\n"));
936 if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
937 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL *must* be set\n"));
938
939 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
940 {
941 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2 = %RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls2.u));
942 val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
943 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
944 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC\n"));
945 if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
946 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_EPT\n"));
947 if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
948 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT\n"));
949 if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
950 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP\n"));
951 if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
952 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC\n"));
953 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
954 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_VPID\n"));
955 if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
956 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT\n"));
957 if (val & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE)
958 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE\n"));
959 if (val & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT)
960 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT\n"));
961
962 val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;
963 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
964 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC *must* be set\n"));
965 if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
966 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT *must* be set\n"));
967 if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
968 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP *must* be set\n"));
969 if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
970 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC *must* be set\n"));
971 if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
972 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_EPT *must* be set\n"));
973 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
974 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_VPID *must* be set\n"));
975 if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
976 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT *must* be set\n"));
977 if (val & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE)
978 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE *must* be set\n"));
979 if (val & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT)
980 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT *must* be set\n"));
981 }
982
983 LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS = %RX64\n", pVM->hm.s.vmx.msr.vmx_entry.u));
984 val = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1;
985 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
986 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG\n"));
987 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST)
988 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST\n"));
989 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
990 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM\n"));
991 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
992 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON\n"));
993 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR)
994 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR\n"));
995 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR)
996 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR\n"));
997 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
998 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR\n"));
999 val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0;
1000 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
1001 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG *must* be set\n"));
1002 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST)
1003 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST *must* be set\n"));
1004 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
1005 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM *must* be set\n"));
1006 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
1007 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON *must* be set\n"));
1008 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR)
1009 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR *must* be set\n"));
1010 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR)
1011 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR *must* be set\n"));
1012 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
1013 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR *must* be set\n"));
1014
1015 LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS = %RX64\n", pVM->hm.s.vmx.msr.vmx_exit.u));
1016 val = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1;
1017 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG)
1018 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG\n"));
1019 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE)
1020 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE\n"));
1021 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
1022 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ\n"));
1023 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR)
1024 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR\n"));
1025 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR)
1026 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR\n"));
1027 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR)
1028 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR\n"));
1029 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
1030 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR\n"));
1031 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
1032 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER\n"));
1033 val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0;
1034 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG)
1035 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG *must* be set\n"));
1036 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE)
1037 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE *must* be set\n"));
1038 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
1039 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ *must* be set\n"));
1040 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR)
1041 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR *must* be set\n"));
1042 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR)
1043 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR *must* be set\n"));
1044 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR)
1045 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR *must* be set\n"));
1046 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
1047 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR *must* be set\n"));
1048 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
1049 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER *must* be set\n"));
1050
1051 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps)
1052 {
1053 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP = %RX64\n", pVM->hm.s.vmx.msr.vmx_ept_vpid_caps));
1054
1055 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY)
1056 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY\n"));
1057 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_RWX_W_ONLY)
1058 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_RWX_W_ONLY\n"));
1059 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_RWX_WX_ONLY)
1060 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_RWX_WX_ONLY\n"));
1061 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_GAW_21_BITS)
1062 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_GAW_21_BITS\n"));
1063 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_GAW_30_BITS)
1064 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_GAW_30_BITS\n"));
1065 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_GAW_39_BITS)
1066 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_GAW_39_BITS\n"));
1067 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_GAW_48_BITS)
1068 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_GAW_48_BITS\n"));
1069 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_GAW_57_BITS)
1070 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_GAW_57_BITS\n"));
1071 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_UC)
1072 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_EMT_UC\n"));
1073 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WC)
1074 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_EMT_WC\n"));
1075 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WT)
1076 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_EMT_WT\n"));
1077 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WP)
1078 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_EMT_WP\n"));
1079 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB)
1080 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB\n"));
1081 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_SP_21_BITS)
1082 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_SP_21_BITS\n"));
1083 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_SP_30_BITS)
1084 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_SP_30_BITS\n"));
1085 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_SP_39_BITS)
1086 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_SP_39_BITS\n"));
1087 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_SP_48_BITS)
1088 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_SP_48_BITS\n"));
1089 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
1090 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_INVEPT\n"));
1091 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
1092 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT\n"));
1093 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1094 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS\n"));
1095 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
1096 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_INVVPID\n"));
1097 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1098 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR\n"));
1099 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
1100 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT\n"));
1101 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
1102 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS\n"));
1103 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
1104 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS\n"));
1105 }
1106
1107 LogRel(("HM: MSR_IA32_VMX_MISC = %RX64\n", pVM->hm.s.vmx.msr.vmx_misc));
1108 if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.vmx_misc) == pVM->hm.s.vmx.cPreemptTimerShift)
1109 LogRel(("HM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.vmx_misc)));
1110 else
1111 {
1112 LogRel(("HM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x - erratum detected, using %x instead\n",
1113 MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.vmx_misc), pVM->hm.s.vmx.cPreemptTimerShift));
1114 }
1115 LogRel(("HM: MSR_IA32_VMX_MISC_ACTIVITY_STATES %x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.msr.vmx_misc)));
1116 LogRel(("HM: MSR_IA32_VMX_MISC_CR3_TARGET %x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hm.s.vmx.msr.vmx_misc)));
1117 LogRel(("HM: MSR_IA32_VMX_MISC_MAX_MSR %x\n", MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)));
1118 LogRel(("HM: MSR_IA32_VMX_MISC_MSEG_ID %x\n", MSR_IA32_VMX_MISC_MSEG_ID(pVM->hm.s.vmx.msr.vmx_misc)));
1119
1120 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0 = %RX64\n", pVM->hm.s.vmx.msr.vmx_cr0_fixed0));
1121 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1 = %RX64\n", pVM->hm.s.vmx.msr.vmx_cr0_fixed1));
1122 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0 = %RX64\n", pVM->hm.s.vmx.msr.vmx_cr4_fixed0));
1123 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1 = %RX64\n", pVM->hm.s.vmx.msr.vmx_cr4_fixed1));
1124 LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM = %RX64\n", pVM->hm.s.vmx.msr.vmx_vmcs_enum));
1125
1126 LogRel(("HM: APIC-access page physaddr = %RHp\n", pVM->hm.s.vmx.HCPhysApicAccess));
1127
1128 /* Paranoia */
1129 AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc) >= 512);
1130
1131 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1132 {
1133 LogRel(("HM: VCPU%d: MSR bitmap physaddr = %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap));
1134 LogRel(("HM: VCPU%d: VMCS physaddr = %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysVMCS));
1135 }
1136
1137 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
1138 pVM->hm.s.fNestedPaging = pVM->hm.s.fAllowNestedPaging;
1139
1140 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
1141 pVM->hm.s.vmx.fVpid = pVM->hm.s.vmx.fAllowVpid;
1142
1143 /*
1144 * Disallow RDTSCP in the guest if there is no secondary process-based VM execution controls as otherwise
1145 * RDTSCP would cause a #UD. There might be no CPUs out there where this happens, as RDTSCP was introduced
1146 * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel...
1147 */
1148 if (!(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1149 && CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP))
1150 {
1151 CPUMClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
1152 }
1153
1154 /* Unrestricted guest execution relies on EPT. */
1155 if ( pVM->hm.s.fNestedPaging
1156 && (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE))
1157 {
1158 pVM->hm.s.vmx.fUnrestrictedGuest = true;
1159 }
1160
1161 /* Only try once. */
1162 pVM->hm.s.fInitialized = true;
1163
1164 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
1165 {
1166 /* Allocate three pages for the TSS we need for real mode emulation. (2 pages for the IO bitmap) */
1167 rc = PDMR3VMMDevHeapAlloc(pVM, HM_VTX_TOTAL_DEVHEAP_MEM, (RTR3PTR *)&pVM->hm.s.vmx.pRealModeTSS);
1168 if (RT_SUCCESS(rc))
1169 {
1170 /* The I/O bitmap starts right after the virtual interrupt redirection bitmap. */
1171 /* Refer Intel spec. 20.3.3 "Software Interrupt Handling in Virtual-8086 mode" esp. Figure 20-5.*/
1172 ASMMemZero32(pVM->hm.s.vmx.pRealModeTSS, sizeof(*pVM->hm.s.vmx.pRealModeTSS));
1173 pVM->hm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hm.s.vmx.pRealModeTSS);
1174 /* Bit set to 0 means software interrupts are redirected to the 8086 program interrupt handler rather than
1175 switching to protected-mode handler. */
1176 memset(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap, 0, sizeof(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap));
1177 /* Allow all port IO, so that port IO instructions do not cause exceptions and would instead
1178 cause a VM-exit (based on VT-x's IO bitmap which we currently configure to always cause an exit). */
1179 memset(pVM->hm.s.vmx.pRealModeTSS + 1, 0, PAGE_SIZE * 2);
1180 *((unsigned char *)pVM->hm.s.vmx.pRealModeTSS + HM_VTX_TSS_SIZE - 2) = 0xff;
1181
1182 /*
1183 * Construct a 1024 element page directory with 4 MB pages for the identity mapped page table used in
1184 * real and protected mode without paging with EPT.
1185 */
1186 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hm.s.vmx.pRealModeTSS + PAGE_SIZE * 3);
1187 for (unsigned i = 0; i < X86_PG_ENTRIES; i++)
1188 {
1189 pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u = _4M * i;
1190 pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US
1191 | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS
1192 | X86_PDE4M_G;
1193 }
1194
1195 /* We convert it here every time as pci regions could be reconfigured. */
1196 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
1197 AssertRC(rc);
1198 LogRel(("HM: Real Mode TSS guest physaddr = %RGp\n", GCPhys));
1199
1200 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
1201 AssertRC(rc);
1202 LogRel(("HM: Non-Paging Mode EPT CR3 = %RGp\n", GCPhys));
1203 }
1204 else
1205 {
1206 LogRel(("HM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));
1207 pVM->hm.s.vmx.pRealModeTSS = NULL;
1208 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = NULL;
1209 }
1210 }
1211
1212 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HM_SETUP_VM, 0, NULL);
1213 AssertRC(rc);
1214 if (rc == VINF_SUCCESS)
1215 {
1216 pVM->fHMEnabled = true;
1217 pVM->hm.s.vmx.fEnabled = true;
1218 hmR3DisableRawMode(pVM);
1219
1220 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1221#ifdef VBOX_ENABLE_64_BITS_GUESTS
1222 if (pVM->hm.s.fAllow64BitGuests)
1223 {
1224 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1225 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1226 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* 64 bits only on Intel CPUs */
1227 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1228 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1229 }
1230 else
1231 /* Turn on NXE if PAE has been enabled *and* the host has turned on NXE (we reuse the host EFER in the switcher) */
1232 /* Todo: this needs to be fixed properly!! */
1233 if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE)
1234 && (pVM->hm.s.vmx.hostEFER & MSR_K6_EFER_NXE))
1235 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1236
1237 LogRel((pVM->hm.s.fAllow64BitGuests
1238 ? "HM: 32-bit and 64-bit guests supported.\n"
1239 : "HM: 32-bit guests supported.\n"));
1240#else
1241 LogRel(("HM: 32-bit guests supported.\n"));
1242#endif
1243 LogRel(("HM: VMX enabled!\n"));
1244 if (pVM->hm.s.fNestedPaging)
1245 {
1246 LogRel(("HM: Enabled nested paging\n"));
1247 LogRel(("HM: EPT root page = %RHp\n", PGMGetHyperCR3(VMMGetCpu(pVM))));
1248 if (pVM->hm.s.vmx.enmFlushEpt == VMX_FLUSH_EPT_SINGLE_CONTEXT)
1249 LogRel(("HM: enmFlushEpt = VMX_FLUSH_EPT_SINGLE_CONTEXT\n"));
1250 else if (pVM->hm.s.vmx.enmFlushEpt == VMX_FLUSH_EPT_ALL_CONTEXTS)
1251 LogRel(("HM: enmFlushEpt = VMX_FLUSH_EPT_ALL_CONTEXTS\n"));
1252 else if (pVM->hm.s.vmx.enmFlushEpt == VMX_FLUSH_EPT_NOT_SUPPORTED)
1253 LogRel(("HM: enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED\n"));
1254 else
1255 LogRel(("HM: enmFlushEpt = %d\n", pVM->hm.s.vmx.enmFlushEpt));
1256
1257 if (pVM->hm.s.vmx.fUnrestrictedGuest)
1258 LogRel(("HM: Unrestricted guest execution enabled!\n"));
1259
1260#if HC_ARCH_BITS == 64
1261 if (pVM->hm.s.fLargePages)
1262 {
1263 /* Use large (2 MB) pages for our EPT PDEs where possible. */
1264 PGMSetLargePageUsage(pVM, true);
1265 LogRel(("HM: Large page support enabled!\n"));
1266 }
1267#endif
1268 }
1269 else
1270 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest);
1271
1272 if (pVM->hm.s.vmx.fVpid)
1273 {
1274 LogRel(("HM: Enabled VPID\n"));
1275 if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_INDIV_ADDR)
1276 LogRel(("HM: enmFlushVpid = VMX_FLUSH_VPID_INDIV_ADDR\n"));
1277 else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_SINGLE_CONTEXT)
1278 LogRel(("HM: enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT\n"));
1279 else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_ALL_CONTEXTS)
1280 LogRel(("HM: enmFlushVpid = VMX_FLUSH_VPID_ALL_CONTEXTS\n"));
1281 else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
1282 LogRel(("HM: enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS\n"));
1283 else
1284 LogRel(("HM: enmFlushVpid = %d\n", pVM->hm.s.vmx.enmFlushVpid));
1285 }
1286 else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_NOT_SUPPORTED)
1287 LogRel(("HM: Ignoring VPID capabilities of CPU.\n"));
1288
1289 /* TPR patching status logging. */
1290 if (pVM->hm.s.fTRPPatchingAllowed)
1291 {
1292 if ( (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1293 && (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
1294 {
1295 pVM->hm.s.fTRPPatchingAllowed = false; /* not necessary as we have a hardware solution. */
1296 LogRel(("HM: TPR Patching not required (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).\n"));
1297 }
1298 else
1299 {
1300 uint32_t u32Eax, u32Dummy;
1301
1302 /* TPR patching needs access to the MSR_K8_LSTAR msr. */
1303 ASMCpuId(0x80000000, &u32Eax, &u32Dummy, &u32Dummy, &u32Dummy);
1304 if ( u32Eax < 0x80000001
1305 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
1306 {
1307 pVM->hm.s.fTRPPatchingAllowed = false;
1308 LogRel(("HM: TPR patching disabled (long mode not supported).\n"));
1309 }
1310 }
1311 }
1312 LogRel(("HM: TPR Patching %s.\n", (pVM->hm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
1313
1314 /*
1315 * Check for preemption timer config override and log the state of it.
1316 */
1317 if (pVM->hm.s.vmx.fUsePreemptTimer)
1318 {
1319 PCFGMNODE pCfgHm = CFGMR3GetChild(CFGMR3GetRoot(pVM), "HM");
1320 int rc2 = CFGMR3QueryBoolDef(pCfgHm, "UsePreemptTimer", &pVM->hm.s.vmx.fUsePreemptTimer, true);
1321 AssertLogRelRC(rc2);
1322 }
1323 if (pVM->hm.s.vmx.fUsePreemptTimer)
1324 LogRel(("HM: Using the VMX-preemption timer (cPreemptTimerShift=%u)\n", pVM->hm.s.vmx.cPreemptTimerShift));
1325 }
1326 else
1327 {
1328 LogRel(("HM: VMX setup failed with rc=%Rrc!\n", rc));
1329 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1330 LogRel(("HM: CPU[%ld] Last instruction error %x\n", i, pVM->aCpus[0].hm.s.vmx.lasterror.ulInstrError));
1331 pVM->fHMEnabled = false;
1332 }
1333 }
1334 }
1335 else
1336 if (pVM->hm.s.svm.fSupported)
1337 {
1338 Log(("pVM->hm.s.svm.fSupported = %d\n", pVM->hm.s.svm.fSupported));
1339
1340 if (pVM->hm.s.fInitialized == false)
1341 {
1342 /* Erratum 170 which requires a forced TLB flush for each world switch:
1343 * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
1344 *
1345 * All BH-G1/2 and DH-G1/2 models include a fix:
1346 * Athlon X2: 0x6b 1/2
1347 * 0x68 1/2
1348 * Athlon 64: 0x7f 1
1349 * 0x6f 2
1350 * Sempron: 0x7f 1/2
1351 * 0x6f 2
1352 * 0x6c 2
1353 * 0x7c 2
1354 * Turion 64: 0x68 2
1355 *
1356 */
1357 uint32_t u32Dummy;
1358 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
1359 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
1360 u32BaseFamily= (u32Version >> 8) & 0xf;
1361 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
1362 u32Model = ((u32Version >> 4) & 0xf);
1363 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
1364 u32Stepping = u32Version & 0xf;
1365 if ( u32Family == 0xf
1366 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
1367 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
1368 {
1369 LogRel(("HM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
1370 }
1371
1372 LogRel(("HM: cpuid 0x80000001.u32AMDFeatureECX = %RX32\n", pVM->hm.s.cpuid.u32AMDFeatureECX));
1373 LogRel(("HM: cpuid 0x80000001.u32AMDFeatureEDX = %RX32\n", pVM->hm.s.cpuid.u32AMDFeatureEDX));
1374 LogRel(("HM: AMD HWCR MSR = %RX64\n", pVM->hm.s.svm.msrHwcr));
1375 LogRel(("HM: AMD-V revision = %X\n", pVM->hm.s.svm.u32Rev));
1376 LogRel(("HM: AMD-V max ASID = %d\n", pVM->hm.s.uMaxAsid));
1377 LogRel(("HM: AMD-V features = %X\n", pVM->hm.s.svm.u32Features));
1378 static const struct { uint32_t fFlag; const char *pszName; } s_aSvmFeatures[] =
1379 {
1380#define FLAG_NAME(a_Define) { a_Define, #a_Define }
1381 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
1382 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT),
1383 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK),
1384 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE),
1385 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR),
1386 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN),
1387 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID),
1388 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST),
1389 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_SSE_3_5_DISABLE),
1390 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER),
1391 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER),
1392#undef FLAG_NAME
1393 };
1394 uint32_t fSvmFeatures = pVM->hm.s.svm.u32Features;
1395 for (unsigned i = 0; i < RT_ELEMENTS(s_aSvmFeatures); i++)
1396 if (fSvmFeatures & s_aSvmFeatures[i].fFlag)
1397 {
1398 LogRel(("HM: %s\n", s_aSvmFeatures[i].pszName));
1399 fSvmFeatures &= ~s_aSvmFeatures[i].fFlag;
1400 }
1401 if (fSvmFeatures)
1402 for (unsigned iBit = 0; iBit < 32; iBit++)
1403 if (RT_BIT_32(iBit) & fSvmFeatures)
1404 LogRel(("HM: Reserved bit %u\n", iBit));
1405
1406 /* Only try once. */
1407 pVM->hm.s.fInitialized = true;
1408
1409 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
1410 pVM->hm.s.fNestedPaging = pVM->hm.s.fAllowNestedPaging;
1411
1412 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HM_SETUP_VM, 0, NULL);
1413 AssertRC(rc);
1414 if (rc == VINF_SUCCESS)
1415 {
1416 pVM->fHMEnabled = true;
1417 pVM->hm.s.svm.fEnabled = true;
1418
1419 if (pVM->hm.s.fNestedPaging)
1420 {
1421 LogRel(("HM: Enabled nested paging\n"));
1422#if HC_ARCH_BITS == 64
1423 if (pVM->hm.s.fLargePages)
1424 {
1425 /* Use large (2 MB) pages for our nested paging PDEs where possible. */
1426 PGMSetLargePageUsage(pVM, true);
1427 LogRel(("HM: Large page support enabled!\n"));
1428 }
1429#endif
1430 }
1431
1432 hmR3DisableRawMode(pVM);
1433 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1434 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);
1435#ifdef VBOX_ENABLE_64_BITS_GUESTS
1436 if (pVM->hm.s.fAllow64BitGuests)
1437 {
1438 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1439 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1440 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1441 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1442 }
1443 else
1444 /* Turn on NXE if PAE has been enabled. */
1445 if (CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
1446 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1447#endif
1448
1449 LogRel((pVM->hm.s.fAllow64BitGuests
1450 ? "HM: 32-bit and 64-bit guest supported.\n"
1451 : "HM: 32-bit guest supported.\n"));
1452
1453 LogRel(("HM: TPR Patching %s.\n", (pVM->hm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
1454 }
1455 else
1456 {
1457 pVM->fHMEnabled = false;
1458 }
1459 }
1460 }
1461 if (pVM->fHMEnabled)
1462 LogRel(("HM: VT-x/AMD-V init method: %s\n", (pVM->hm.s.fGlobalInit) ? "GLOBAL" : "LOCAL"));
1463 RTLogRelSetBuffering(fOldBuffered);
1464 return VINF_SUCCESS;
1465}
1466
1467
1468/**
1469 * Applies relocations to data and code managed by this
1470 * component. This function will be called at init and
1471 * whenever the VMM need to relocate it self inside the GC.
1472 *
1473 * @param pVM The VM.
1474 */
1475VMMR3DECL(void) HMR3Relocate(PVM pVM)
1476{
1477 Log(("HMR3Relocate to %RGv\n", MMHyperGetArea(pVM, 0)));
1478
1479 /* Fetch the current paging mode during the relocate callback during state loading. */
1480 if (VMR3GetState(pVM) == VMSTATE_LOADING)
1481 {
1482 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1483 {
1484 PVMCPU pVCpu = &pVM->aCpus[i];
1485
1486 pVCpu->hm.s.enmShadowMode = PGMGetShadowMode(pVCpu);
1487 Assert(pVCpu->hm.s.vmx.enmCurrGuestMode == PGMGetGuestMode(pVCpu));
1488 pVCpu->hm.s.vmx.enmCurrGuestMode = PGMGetGuestMode(pVCpu);
1489 }
1490 }
1491#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1492 if (pVM->fHMEnabled)
1493 {
1494 int rc;
1495 switch (PGMGetHostMode(pVM))
1496 {
1497 case PGMMODE_32_BIT:
1498 pVM->hm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_32_TO_AMD64);
1499 break;
1500
1501 case PGMMODE_PAE:
1502 case PGMMODE_PAE_NX:
1503 pVM->hm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_PAE_TO_AMD64);
1504 break;
1505
1506 default:
1507 AssertFailed();
1508 break;
1509 }
1510 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "VMXGCStartVM64", &pVM->hm.s.pfnVMXGCStartVM64);
1511 AssertReleaseMsgRC(rc, ("VMXGCStartVM64 -> rc=%Rrc\n", rc));
1512
1513 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "SVMGCVMRun64", &pVM->hm.s.pfnSVMGCVMRun64);
1514 AssertReleaseMsgRC(rc, ("SVMGCVMRun64 -> rc=%Rrc\n", rc));
1515
1516 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HMSaveGuestFPU64", &pVM->hm.s.pfnSaveGuestFPU64);
1517 AssertReleaseMsgRC(rc, ("HMSetupFPU64 -> rc=%Rrc\n", rc));
1518
1519 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HMSaveGuestDebug64", &pVM->hm.s.pfnSaveGuestDebug64);
1520 AssertReleaseMsgRC(rc, ("HMSetupDebug64 -> rc=%Rrc\n", rc));
1521
1522# ifdef DEBUG
1523 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HMTestSwitcher64", &pVM->hm.s.pfnTest64);
1524 AssertReleaseMsgRC(rc, ("HMTestSwitcher64 -> rc=%Rrc\n", rc));
1525# endif
1526 }
1527#endif
1528 return;
1529}
1530
1531
1532/**
1533 * Checks if hardware accelerated raw mode is allowed.
1534 *
1535 * @returns true if hardware acceleration is allowed, otherwise false.
1536 * @param pVM Pointer to the VM.
1537 */
1538VMMR3DECL(bool) HMR3IsAllowed(PVM pVM)
1539{
1540 return pVM->hm.s.fAllowed;
1541}
1542
1543
1544/**
1545 * Notification callback which is called whenever there is a chance that a CR3
1546 * value might have changed.
1547 *
1548 * This is called by PGM.
1549 *
1550 * @param pVM Pointer to the VM.
1551 * @param pVCpu Pointer to the VMCPU.
1552 * @param enmShadowMode New shadow paging mode.
1553 * @param enmGuestMode New guest paging mode.
1554 */
1555VMMR3DECL(void) HMR3PagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
1556{
1557 /* Ignore page mode changes during state loading. */
1558 if (VMR3GetState(pVCpu->pVMR3) == VMSTATE_LOADING)
1559 return;
1560
1561 pVCpu->hm.s.enmShadowMode = enmShadowMode;
1562
1563 if ( pVM->hm.s.vmx.fEnabled
1564 && pVM->fHMEnabled)
1565 {
1566 if ( pVCpu->hm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
1567 && enmGuestMode >= PGMMODE_PROTECTED)
1568 {
1569 PCPUMCTX pCtx;
1570
1571 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1572
1573 /* After a real mode switch to protected mode we must force
1574 CPL to 0. Our real mode emulation had to set it to 3. */
1575 pCtx->ss.Attr.n.u2Dpl = 0;
1576 }
1577 }
1578
1579 if (pVCpu->hm.s.vmx.enmCurrGuestMode != enmGuestMode)
1580 {
1581 /* Keep track of paging mode changes. */
1582 pVCpu->hm.s.vmx.enmPrevGuestMode = pVCpu->hm.s.vmx.enmCurrGuestMode;
1583 pVCpu->hm.s.vmx.enmCurrGuestMode = enmGuestMode;
1584
1585 /* Did we miss a change, because all code was executed in the recompiler? */
1586 if (pVCpu->hm.s.vmx.enmLastSeenGuestMode == enmGuestMode)
1587 {
1588 Log(("HMR3PagingModeChanged missed %s->%s transition (prev %s)\n", PGMGetModeName(pVCpu->hm.s.vmx.enmPrevGuestMode),
1589 PGMGetModeName(pVCpu->hm.s.vmx.enmCurrGuestMode), PGMGetModeName(pVCpu->hm.s.vmx.enmLastSeenGuestMode)));
1590 pVCpu->hm.s.vmx.enmLastSeenGuestMode = pVCpu->hm.s.vmx.enmPrevGuestMode;
1591 }
1592 }
1593
1594 /* Reset the contents of the read cache. */
1595 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
1596 for (unsigned j = 0; j < pCache->Read.cValidEntries; j++)
1597 pCache->Read.aFieldVal[j] = 0;
1598}
1599
1600
1601/**
1602 * Terminates the HM.
1603 *
1604 * Termination means cleaning up and freeing all resources,
1605 * the VM itself is, at this point, powered off or suspended.
1606 *
1607 * @returns VBox status code.
1608 * @param pVM Pointer to the VM.
1609 */
1610VMMR3DECL(int) HMR3Term(PVM pVM)
1611{
1612 if (pVM->hm.s.vmx.pRealModeTSS)
1613 {
1614 PDMR3VMMDevHeapFree(pVM, pVM->hm.s.vmx.pRealModeTSS);
1615 pVM->hm.s.vmx.pRealModeTSS = 0;
1616 }
1617 hmR3TermCPU(pVM);
1618 return 0;
1619}
1620
1621
1622/**
1623 * Terminates the per-VCPU HM.
1624 *
1625 * @returns VBox status code.
1626 * @param pVM Pointer to the VM.
1627 */
1628static int hmR3TermCPU(PVM pVM)
1629{
1630 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1631 {
1632 PVMCPU pVCpu = &pVM->aCpus[i]; NOREF(pVCpu);
1633
1634#ifdef VBOX_WITH_STATISTICS
1635 if (pVCpu->hm.s.paStatExitReason)
1636 {
1637 MMHyperFree(pVM, pVCpu->hm.s.paStatExitReason);
1638 pVCpu->hm.s.paStatExitReason = NULL;
1639 pVCpu->hm.s.paStatExitReasonR0 = NIL_RTR0PTR;
1640 }
1641 if (pVCpu->hm.s.paStatInjectedIrqs)
1642 {
1643 MMHyperFree(pVM, pVCpu->hm.s.paStatInjectedIrqs);
1644 pVCpu->hm.s.paStatInjectedIrqs = NULL;
1645 pVCpu->hm.s.paStatInjectedIrqsR0 = NIL_RTR0PTR;
1646 }
1647#endif
1648
1649#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1650 memset(pVCpu->hm.s.vmx.VMCSCache.aMagic, 0, sizeof(pVCpu->hm.s.vmx.VMCSCache.aMagic));
1651 pVCpu->hm.s.vmx.VMCSCache.uMagic = 0;
1652 pVCpu->hm.s.vmx.VMCSCache.uPos = 0xffffffff;
1653#endif
1654 }
1655 return 0;
1656}
1657
1658
1659/**
1660 * Resets a virtual CPU.
1661 *
1662 * Used by HMR3Reset and CPU hot plugging.
1663 *
1664 * @param pVCpu The CPU to reset.
1665 */
1666VMMR3DECL(void) HMR3ResetCpu(PVMCPU pVCpu)
1667{
1668 /* On first entry we'll sync everything. */
1669 pVCpu->hm.s.fContextUseFlags = HM_CHANGED_ALL;
1670
1671 pVCpu->hm.s.vmx.cr0_mask = 0;
1672 pVCpu->hm.s.vmx.cr4_mask = 0;
1673
1674 pVCpu->hm.s.fActive = false;
1675 pVCpu->hm.s.Event.fPending = false;
1676
1677 /* Reset state information for real-mode emulation in VT-x. */
1678 pVCpu->hm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
1679 pVCpu->hm.s.vmx.enmPrevGuestMode = PGMMODE_REAL;
1680 pVCpu->hm.s.vmx.enmCurrGuestMode = PGMMODE_REAL;
1681
1682 /* Reset the contents of the read cache. */
1683 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
1684 for (unsigned j = 0; j < pCache->Read.cValidEntries; j++)
1685 pCache->Read.aFieldVal[j] = 0;
1686
1687#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1688 /* Magic marker for searching in crash dumps. */
1689 strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
1690 pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
1691#endif
1692}
1693
1694
1695/**
1696 * The VM is being reset.
1697 *
1698 * For the HM component this means that any GDT/LDT/TSS monitors
1699 * needs to be removed.
1700 *
1701 * @param pVM Pointer to the VM.
1702 */
1703VMMR3DECL(void) HMR3Reset(PVM pVM)
1704{
1705 LogFlow(("HMR3Reset:\n"));
1706
1707 if (pVM->fHMEnabled)
1708 hmR3DisableRawMode(pVM);
1709
1710 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1711 {
1712 PVMCPU pVCpu = &pVM->aCpus[i];
1713
1714 HMR3ResetCpu(pVCpu);
1715 }
1716
1717 /* Clear all patch information. */
1718 pVM->hm.s.pGuestPatchMem = 0;
1719 pVM->hm.s.pFreeGuestPatchMem = 0;
1720 pVM->hm.s.cbGuestPatchMem = 0;
1721 pVM->hm.s.cPatches = 0;
1722 pVM->hm.s.PatchTree = 0;
1723 pVM->hm.s.fTPRPatchingActive = false;
1724 ASMMemZero32(pVM->hm.s.aPatches, sizeof(pVM->hm.s.aPatches));
1725}
1726
1727
1728/**
1729 * Callback to patch a TPR instruction (vmmcall or mov cr8).
1730 *
1731 * @returns VBox strict status code.
1732 * @param pVM Pointer to the VM.
1733 * @param pVCpu The VMCPU for the EMT we're being called on.
1734 * @param pvUser Unused.
1735 */
1736DECLCALLBACK(VBOXSTRICTRC) hmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
1737{
1738 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
1739
1740 /* Only execute the handler on the VCPU the original patch request was issued. */
1741 if (pVCpu->idCpu != idCpu)
1742 return VINF_SUCCESS;
1743
1744 Log(("hmR3RemovePatches\n"));
1745 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
1746 {
1747 uint8_t abInstr[15];
1748 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
1749 RTGCPTR pInstrGC = (RTGCPTR)pPatch->Core.Key;
1750 int rc;
1751
1752#ifdef LOG_ENABLED
1753 char szOutput[256];
1754
1755 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
1756 szOutput, sizeof(szOutput), NULL);
1757 if (RT_SUCCESS(rc))
1758 Log(("Patched instr: %s\n", szOutput));
1759#endif
1760
1761 /* Check if the instruction is still the same. */
1762 rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pInstrGC, pPatch->cbNewOp);
1763 if (rc != VINF_SUCCESS)
1764 {
1765 Log(("Patched code removed? (rc=%Rrc0\n", rc));
1766 continue; /* swapped out or otherwise removed; skip it. */
1767 }
1768
1769 if (memcmp(abInstr, pPatch->aNewOpcode, pPatch->cbNewOp))
1770 {
1771 Log(("Patched instruction was changed! (rc=%Rrc0\n", rc));
1772 continue; /* skip it. */
1773 }
1774
1775 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pInstrGC, pPatch->aOpcode, pPatch->cbOp);
1776 AssertRC(rc);
1777
1778#ifdef LOG_ENABLED
1779 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
1780 szOutput, sizeof(szOutput), NULL);
1781 if (RT_SUCCESS(rc))
1782 Log(("Original instr: %s\n", szOutput));
1783#endif
1784 }
1785 pVM->hm.s.cPatches = 0;
1786 pVM->hm.s.PatchTree = 0;
1787 pVM->hm.s.pFreeGuestPatchMem = pVM->hm.s.pGuestPatchMem;
1788 pVM->hm.s.fTPRPatchingActive = false;
1789 return VINF_SUCCESS;
1790}
1791
1792
1793/**
1794 * Worker for enabling patching in a VT-x/AMD-V guest.
1795 *
1796 * @returns VBox status code.
1797 * @param pVM Pointer to the VM.
1798 * @param idCpu VCPU to execute hmR3RemovePatches on.
1799 * @param pPatchMem Patch memory range.
1800 * @param cbPatchMem Size of the memory range.
1801 */
1802static int hmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
1803{
1804 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches, (void *)(uintptr_t)idCpu);
1805 AssertRC(rc);
1806
1807 pVM->hm.s.pGuestPatchMem = pPatchMem;
1808 pVM->hm.s.pFreeGuestPatchMem = pPatchMem;
1809 pVM->hm.s.cbGuestPatchMem = cbPatchMem;
1810 return VINF_SUCCESS;
1811}
1812
1813
1814/**
1815 * Enable patching in a VT-x/AMD-V guest
1816 *
1817 * @returns VBox status code.
1818 * @param pVM Pointer to the VM.
1819 * @param pPatchMem Patch memory range.
1820 * @param cbPatchMem Size of the memory range.
1821 */
1822VMMR3DECL(int) HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1823{
1824 VM_ASSERT_EMT(pVM);
1825 Log(("HMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
1826 if (pVM->cCpus > 1)
1827 {
1828 /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
1829 int rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE,
1830 (PFNRT)hmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
1831 AssertRC(rc);
1832 return rc;
1833 }
1834 return hmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
1835}
1836
1837
1838/**
1839 * Disable patching in a VT-x/AMD-V guest.
1840 *
1841 * @returns VBox status code.
1842 * @param pVM Pointer to the VM.
1843 * @param pPatchMem Patch memory range.
1844 * @param cbPatchMem Size of the memory range.
1845 */
1846VMMR3DECL(int) HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1847{
1848 Log(("HMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
1849
1850 Assert(pVM->hm.s.pGuestPatchMem == pPatchMem);
1851 Assert(pVM->hm.s.cbGuestPatchMem == cbPatchMem);
1852
1853 /* @todo Potential deadlock when other VCPUs are waiting on the IOM lock (we own it)!! */
1854 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches,
1855 (void *)(uintptr_t)VMMGetCpuId(pVM));
1856 AssertRC(rc);
1857
1858 pVM->hm.s.pGuestPatchMem = 0;
1859 pVM->hm.s.pFreeGuestPatchMem = 0;
1860 pVM->hm.s.cbGuestPatchMem = 0;
1861 pVM->hm.s.fTPRPatchingActive = false;
1862 return VINF_SUCCESS;
1863}
1864
1865
1866/**
1867 * Callback to patch a TPR instruction (vmmcall or mov cr8).
1868 *
1869 * @returns VBox strict status code.
1870 * @param pVM Pointer to the VM.
1871 * @param pVCpu The VMCPU for the EMT we're being called on.
1872 * @param pvUser User specified CPU context.
1873 *
1874 */
1875DECLCALLBACK(VBOXSTRICTRC) hmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
1876{
1877 /*
1878 * Only execute the handler on the VCPU the original patch request was
1879 * issued. (The other CPU(s) might not yet have switched to protected
1880 * mode, nor have the correct memory context.)
1881 */
1882 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
1883 if (pVCpu->idCpu != idCpu)
1884 return VINF_SUCCESS;
1885
1886 /*
1887 * We're racing other VCPUs here, so don't try patch the instruction twice
1888 * and make sure there is still room for our patch record.
1889 */
1890 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1891 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
1892 if (pPatch)
1893 {
1894 Log(("hmR3ReplaceTprInstr: already patched %RGv\n", pCtx->rip));
1895 return VINF_SUCCESS;
1896 }
1897 uint32_t const idx = pVM->hm.s.cPatches;
1898 if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
1899 {
1900 Log(("hmR3ReplaceTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
1901 return VINF_SUCCESS;
1902 }
1903 pPatch = &pVM->hm.s.aPatches[idx];
1904
1905 Log(("hmR3ReplaceTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
1906
1907 /*
1908 * Disassembler the instruction and get cracking.
1909 */
1910 DBGFR3DisasInstrCurrentLog(pVCpu, "hmR3ReplaceTprInstr");
1911 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
1912 uint32_t cbOp;
1913 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1914 AssertRC(rc);
1915 if ( rc == VINF_SUCCESS
1916 && pDis->pCurInstr->uOpcode == OP_MOV
1917 && cbOp >= 3)
1918 {
1919 static uint8_t const s_abVMMCall[3] = { 0x0f, 0x01, 0xd9 };
1920
1921 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
1922 AssertRC(rc);
1923
1924 pPatch->cbOp = cbOp;
1925
1926 if (pDis->Param1.fUse == DISUSE_DISPLACEMENT32)
1927 {
1928 /* write. */
1929 if (pDis->Param2.fUse == DISUSE_REG_GEN32)
1930 {
1931 pPatch->enmType = HMTPRINSTR_WRITE_REG;
1932 pPatch->uSrcOperand = pDis->Param2.Base.idxGenReg;
1933 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_REG %u\n", pDis->Param2.Base.idxGenReg));
1934 }
1935 else
1936 {
1937 Assert(pDis->Param2.fUse == DISUSE_IMMEDIATE32);
1938 pPatch->enmType = HMTPRINSTR_WRITE_IMM;
1939 pPatch->uSrcOperand = pDis->Param2.uValue;
1940 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_IMM %#llx\n", pDis->Param2.uValue));
1941 }
1942 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
1943 AssertRC(rc);
1944
1945 memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
1946 pPatch->cbNewOp = sizeof(s_abVMMCall);
1947 }
1948 else
1949 {
1950 /*
1951 * TPR Read.
1952 *
1953 * Found:
1954 * mov eax, dword [fffe0080] (5 bytes)
1955 * Check if next instruction is:
1956 * shr eax, 4
1957 */
1958 Assert(pDis->Param1.fUse == DISUSE_REG_GEN32);
1959
1960 uint8_t const idxMmioReg = pDis->Param1.Base.idxGenReg;
1961 uint8_t const cbOpMmio = cbOp;
1962 uint64_t const uSavedRip = pCtx->rip;
1963
1964 pCtx->rip += cbOp;
1965 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1966 DBGFR3DisasInstrCurrentLog(pVCpu, "Following read");
1967 pCtx->rip = uSavedRip;
1968
1969 if ( rc == VINF_SUCCESS
1970 && pDis->pCurInstr->uOpcode == OP_SHR
1971 && pDis->Param1.fUse == DISUSE_REG_GEN32
1972 && pDis->Param1.Base.idxGenReg == idxMmioReg
1973 && pDis->Param2.fUse == DISUSE_IMMEDIATE8
1974 && pDis->Param2.uValue == 4
1975 && cbOpMmio + cbOp < sizeof(pVM->hm.s.aPatches[idx].aOpcode))
1976 {
1977 uint8_t abInstr[15];
1978
1979 /* Replacing two instructions now. */
1980 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pPatch->aOpcode, pCtx->rip, cbOpMmio + cbOp);
1981 AssertRC(rc);
1982
1983 pPatch->cbOp = cbOpMmio + cbOp;
1984
1985 /* 0xF0, 0x0F, 0x20, 0xC0 = mov eax, cr8 */
1986 abInstr[0] = 0xF0;
1987 abInstr[1] = 0x0F;
1988 abInstr[2] = 0x20;
1989 abInstr[3] = 0xC0 | pDis->Param1.Base.idxGenReg;
1990 for (unsigned i = 4; i < pPatch->cbOp; i++)
1991 abInstr[i] = 0x90; /* nop */
1992
1993 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, abInstr, pPatch->cbOp);
1994 AssertRC(rc);
1995
1996 memcpy(pPatch->aNewOpcode, abInstr, pPatch->cbOp);
1997 pPatch->cbNewOp = pPatch->cbOp;
1998
1999 Log(("Acceptable read/shr candidate!\n"));
2000 pPatch->enmType = HMTPRINSTR_READ_SHR4;
2001 }
2002 else
2003 {
2004 pPatch->enmType = HMTPRINSTR_READ;
2005 pPatch->uDstOperand = idxMmioReg;
2006
2007 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
2008 AssertRC(rc);
2009
2010 memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
2011 pPatch->cbNewOp = sizeof(s_abVMMCall);
2012 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_READ %u\n", pPatch->uDstOperand));
2013 }
2014 }
2015
2016 pPatch->Core.Key = pCtx->eip;
2017 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2018 AssertRC(rc);
2019
2020 pVM->hm.s.cPatches++;
2021 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccess);
2022 return VINF_SUCCESS;
2023 }
2024
2025 /*
2026 * Save invalid patch, so we will not try again.
2027 */
2028 Log(("hmR3ReplaceTprInstr: Failed to patch instr!\n"));
2029 pPatch->Core.Key = pCtx->eip;
2030 pPatch->enmType = HMTPRINSTR_INVALID;
2031 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2032 AssertRC(rc);
2033 pVM->hm.s.cPatches++;
2034 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceFailure);
2035 return VINF_SUCCESS;
2036}
2037
2038
2039/**
2040 * Callback to patch a TPR instruction (jump to generated code).
2041 *
2042 * @returns VBox strict status code.
2043 * @param pVM Pointer to the VM.
2044 * @param pVCpu The VMCPU for the EMT we're being called on.
2045 * @param pvUser User specified CPU context.
2046 *
2047 */
2048DECLCALLBACK(VBOXSTRICTRC) hmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
2049{
2050 /*
2051 * Only execute the handler on the VCPU the original patch request was
2052 * issued. (The other CPU(s) might not yet have switched to protected
2053 * mode, nor have the correct memory context.)
2054 */
2055 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
2056 if (pVCpu->idCpu != idCpu)
2057 return VINF_SUCCESS;
2058
2059 /*
2060 * We're racing other VCPUs here, so don't try patch the instruction twice
2061 * and make sure there is still room for our patch record.
2062 */
2063 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2064 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
2065 if (pPatch)
2066 {
2067 Log(("hmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
2068 return VINF_SUCCESS;
2069 }
2070 uint32_t const idx = pVM->hm.s.cPatches;
2071 if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
2072 {
2073 Log(("hmR3PatchTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
2074 return VINF_SUCCESS;
2075 }
2076 pPatch = &pVM->hm.s.aPatches[idx];
2077
2078 Log(("hmR3PatchTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
2079 DBGFR3DisasInstrCurrentLog(pVCpu, "hmR3PatchTprInstr");
2080
2081 /*
2082 * Disassemble the instruction and get cracking.
2083 */
2084 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
2085 uint32_t cbOp;
2086 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
2087 AssertRC(rc);
2088 if ( rc == VINF_SUCCESS
2089 && pDis->pCurInstr->uOpcode == OP_MOV
2090 && cbOp >= 5)
2091 {
2092 uint8_t aPatch[64];
2093 uint32_t off = 0;
2094
2095 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
2096 AssertRC(rc);
2097
2098 pPatch->cbOp = cbOp;
2099 pPatch->enmType = HMTPRINSTR_JUMP_REPLACEMENT;
2100
2101 if (pDis->Param1.fUse == DISUSE_DISPLACEMENT32)
2102 {
2103 /*
2104 * TPR write:
2105 *
2106 * push ECX [51]
2107 * push EDX [52]
2108 * push EAX [50]
2109 * xor EDX,EDX [31 D2]
2110 * mov EAX,EAX [89 C0]
2111 * or
2112 * mov EAX,0000000CCh [B8 CC 00 00 00]
2113 * mov ECX,0C0000082h [B9 82 00 00 C0]
2114 * wrmsr [0F 30]
2115 * pop EAX [58]
2116 * pop EDX [5A]
2117 * pop ECX [59]
2118 * jmp return_address [E9 return_address]
2119 *
2120 */
2121 bool fUsesEax = (pDis->Param2.fUse == DISUSE_REG_GEN32 && pDis->Param2.Base.idxGenReg == DISGREG_EAX);
2122
2123 aPatch[off++] = 0x51; /* push ecx */
2124 aPatch[off++] = 0x52; /* push edx */
2125 if (!fUsesEax)
2126 aPatch[off++] = 0x50; /* push eax */
2127 aPatch[off++] = 0x31; /* xor edx, edx */
2128 aPatch[off++] = 0xD2;
2129 if (pDis->Param2.fUse == DISUSE_REG_GEN32)
2130 {
2131 if (!fUsesEax)
2132 {
2133 aPatch[off++] = 0x89; /* mov eax, src_reg */
2134 aPatch[off++] = MAKE_MODRM(3, pDis->Param2.Base.idxGenReg, DISGREG_EAX);
2135 }
2136 }
2137 else
2138 {
2139 Assert(pDis->Param2.fUse == DISUSE_IMMEDIATE32);
2140 aPatch[off++] = 0xB8; /* mov eax, immediate */
2141 *(uint32_t *)&aPatch[off] = pDis->Param2.uValue;
2142 off += sizeof(uint32_t);
2143 }
2144 aPatch[off++] = 0xB9; /* mov ecx, 0xc0000082 */
2145 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
2146 off += sizeof(uint32_t);
2147
2148 aPatch[off++] = 0x0F; /* wrmsr */
2149 aPatch[off++] = 0x30;
2150 if (!fUsesEax)
2151 aPatch[off++] = 0x58; /* pop eax */
2152 aPatch[off++] = 0x5A; /* pop edx */
2153 aPatch[off++] = 0x59; /* pop ecx */
2154 }
2155 else
2156 {
2157 /*
2158 * TPR read:
2159 *
2160 * push ECX [51]
2161 * push EDX [52]
2162 * push EAX [50]
2163 * mov ECX,0C0000082h [B9 82 00 00 C0]
2164 * rdmsr [0F 32]
2165 * mov EAX,EAX [89 C0]
2166 * pop EAX [58]
2167 * pop EDX [5A]
2168 * pop ECX [59]
2169 * jmp return_address [E9 return_address]
2170 *
2171 */
2172 Assert(pDis->Param1.fUse == DISUSE_REG_GEN32);
2173
2174 if (pDis->Param1.Base.idxGenReg != DISGREG_ECX)
2175 aPatch[off++] = 0x51; /* push ecx */
2176 if (pDis->Param1.Base.idxGenReg != DISGREG_EDX )
2177 aPatch[off++] = 0x52; /* push edx */
2178 if (pDis->Param1.Base.idxGenReg != DISGREG_EAX)
2179 aPatch[off++] = 0x50; /* push eax */
2180
2181 aPatch[off++] = 0x31; /* xor edx, edx */
2182 aPatch[off++] = 0xD2;
2183
2184 aPatch[off++] = 0xB9; /* mov ecx, 0xc0000082 */
2185 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
2186 off += sizeof(uint32_t);
2187
2188 aPatch[off++] = 0x0F; /* rdmsr */
2189 aPatch[off++] = 0x32;
2190
2191 if (pDis->Param1.Base.idxGenReg != DISGREG_EAX)
2192 {
2193 aPatch[off++] = 0x89; /* mov dst_reg, eax */
2194 aPatch[off++] = MAKE_MODRM(3, DISGREG_EAX, pDis->Param1.Base.idxGenReg);
2195 }
2196
2197 if (pDis->Param1.Base.idxGenReg != DISGREG_EAX)
2198 aPatch[off++] = 0x58; /* pop eax */
2199 if (pDis->Param1.Base.idxGenReg != DISGREG_EDX )
2200 aPatch[off++] = 0x5A; /* pop edx */
2201 if (pDis->Param1.Base.idxGenReg != DISGREG_ECX)
2202 aPatch[off++] = 0x59; /* pop ecx */
2203 }
2204 aPatch[off++] = 0xE9; /* jmp return_address */
2205 *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem + off + 4);
2206 off += sizeof(RTRCUINTPTR);
2207
2208 if (pVM->hm.s.pFreeGuestPatchMem + off <= pVM->hm.s.pGuestPatchMem + pVM->hm.s.cbGuestPatchMem)
2209 {
2210 /* Write new code to the patch buffer. */
2211 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hm.s.pFreeGuestPatchMem, aPatch, off);
2212 AssertRC(rc);
2213
2214#ifdef LOG_ENABLED
2215 uint32_t cbCurInstr;
2216 for (RTGCPTR GCPtrInstr = pVM->hm.s.pFreeGuestPatchMem;
2217 GCPtrInstr < pVM->hm.s.pFreeGuestPatchMem + off;
2218 GCPtrInstr += RT_MAX(cbCurInstr, 1))
2219 {
2220 char szOutput[256];
2221 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs.Sel, GCPtrInstr, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2222 szOutput, sizeof(szOutput), &cbCurInstr);
2223 if (RT_SUCCESS(rc))
2224 Log(("Patch instr %s\n", szOutput));
2225 else
2226 Log(("%RGv: rc=%Rrc\n", GCPtrInstr, rc));
2227 }
2228#endif
2229
2230 pPatch->aNewOpcode[0] = 0xE9;
2231 *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
2232
2233 /* Overwrite the TPR instruction with a jump. */
2234 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->eip, pPatch->aNewOpcode, 5);
2235 AssertRC(rc);
2236
2237 DBGFR3DisasInstrCurrentLog(pVCpu, "Jump");
2238
2239 pVM->hm.s.pFreeGuestPatchMem += off;
2240 pPatch->cbNewOp = 5;
2241
2242 pPatch->Core.Key = pCtx->eip;
2243 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2244 AssertRC(rc);
2245
2246 pVM->hm.s.cPatches++;
2247 pVM->hm.s.fTPRPatchingActive = true;
2248 STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchSuccess);
2249 return VINF_SUCCESS;
2250 }
2251
2252 Log(("Ran out of space in our patch buffer!\n"));
2253 }
2254 else
2255 Log(("hmR3PatchTprInstr: Failed to patch instr!\n"));
2256
2257
2258 /*
2259 * Save invalid patch, so we will not try again.
2260 */
2261 pPatch = &pVM->hm.s.aPatches[idx];
2262 pPatch->Core.Key = pCtx->eip;
2263 pPatch->enmType = HMTPRINSTR_INVALID;
2264 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2265 AssertRC(rc);
2266 pVM->hm.s.cPatches++;
2267 STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchFailure);
2268 return VINF_SUCCESS;
2269}
2270
2271
2272/**
2273 * Attempt to patch TPR mmio instructions.
2274 *
2275 * @returns VBox status code.
2276 * @param pVM Pointer to the VM.
2277 * @param pVCpu Pointer to the VMCPU.
2278 * @param pCtx Pointer to the guest CPU context.
2279 */
2280VMMR3DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2281{
2282 NOREF(pCtx);
2283 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE,
2284 pVM->hm.s.pGuestPatchMem ? hmR3PatchTprInstr : hmR3ReplaceTprInstr,
2285 (void *)(uintptr_t)pVCpu->idCpu);
2286 AssertRC(rc);
2287 return rc;
2288}
2289
2290
2291/**
2292 * Force execution of the current IO code in the recompiler.
2293 *
2294 * @returns VBox status code.
2295 * @param pVM Pointer to the VM.
2296 * @param pCtx Partial VM execution context.
2297 */
2298VMMR3DECL(int) HMR3EmulateIoBlock(PVM pVM, PCPUMCTX pCtx)
2299{
2300 PVMCPU pVCpu = VMMGetCpu(pVM);
2301
2302 Assert(pVM->fHMEnabled);
2303 Log(("HMR3EmulateIoBlock\n"));
2304
2305 /* This is primarily intended to speed up Grub, so we don't care about paged protected mode. */
2306 if (HMCanEmulateIoBlockEx(pCtx))
2307 {
2308 Log(("HMR3EmulateIoBlock -> enabled\n"));
2309 pVCpu->hm.s.EmulateIoBlock.fEnabled = true;
2310 pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip = pCtx->rip;
2311 pVCpu->hm.s.EmulateIoBlock.cr0 = pCtx->cr0;
2312 return VINF_EM_RESCHEDULE_REM;
2313 }
2314 return VINF_SUCCESS;
2315}
2316
2317
2318/**
2319 * Checks if we can currently use hardware accelerated raw mode.
2320 *
2321 * @returns true if we can currently use hardware acceleration, otherwise false.
2322 * @param pVM Pointer to the VM.
2323 * @param pCtx Partial VM execution context.
2324 */
2325VMMR3DECL(bool) HMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)
2326{
2327 PVMCPU pVCpu = VMMGetCpu(pVM);
2328
2329 Assert(pVM->fHMEnabled);
2330
2331 /* If we're still executing the IO code, then return false. */
2332 if ( RT_UNLIKELY(pVCpu->hm.s.EmulateIoBlock.fEnabled)
2333 && pCtx->rip < pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip + 0x200
2334 && pCtx->rip > pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip - 0x200
2335 && pCtx->cr0 == pVCpu->hm.s.EmulateIoBlock.cr0)
2336 return false;
2337
2338 pVCpu->hm.s.EmulateIoBlock.fEnabled = false;
2339
2340 /* AMD-V supports real & protected mode with or without paging. */
2341 if (pVM->hm.s.svm.fEnabled)
2342 {
2343 pVCpu->hm.s.fActive = true;
2344 return true;
2345 }
2346
2347 pVCpu->hm.s.fActive = false;
2348
2349 /* Note! The context supplied by REM is partial. If we add more checks here, be sure to verify that REM provides this info! */
2350 Assert( (pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS)
2351 || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS));
2352
2353 bool fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVMMDevHeapIsEnabled(pVM);
2354 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
2355 {
2356 /*
2357 * The VMM device heap is a requirement for emulating real mode or protected mode without paging with the unrestricted
2358 * guest execution feature i missing (VT-x only).
2359 */
2360 if (fSupportsRealMode)
2361 {
2362 if (CPUMIsGuestInRealModeEx(pCtx))
2363 {
2364 /* In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector
2365 * bases and limits, i.e. limit must be 64K and base must be selector * 16.
2366 * If this is not true, we cannot execute real mode as V86 and have to fall
2367 * back to emulation.
2368 */
2369 if ( pCtx->cs.Sel != (pCtx->cs.u64Base >> 4)
2370 || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4)
2371 || pCtx->es.Sel != (pCtx->es.u64Base >> 4)
2372 || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4)
2373 || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4)
2374 || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4)
2375 || (pCtx->cs.u32Limit != 0xffff)
2376 || (pCtx->ds.u32Limit != 0xffff)
2377 || (pCtx->es.u32Limit != 0xffff)
2378 || (pCtx->ss.u32Limit != 0xffff)
2379 || (pCtx->fs.u32Limit != 0xffff)
2380 || (pCtx->gs.u32Limit != 0xffff))
2381 {
2382 return false;
2383 }
2384 }
2385 else
2386 {
2387 PGMMODE enmGuestMode = PGMGetGuestMode(pVCpu);
2388 /* Verify the requirements for executing code in protected
2389 mode. VT-x can't handle the CPU state right after a switch
2390 from real to protected mode. (all sorts of RPL & DPL assumptions) */
2391 if ( pVCpu->hm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
2392 && enmGuestMode >= PGMMODE_PROTECTED)
2393 {
2394 if ( (pCtx->cs.Sel & X86_SEL_RPL)
2395 || (pCtx->ds.Sel & X86_SEL_RPL)
2396 || (pCtx->es.Sel & X86_SEL_RPL)
2397 || (pCtx->fs.Sel & X86_SEL_RPL)
2398 || (pCtx->gs.Sel & X86_SEL_RPL)
2399 || (pCtx->ss.Sel & X86_SEL_RPL))
2400 {
2401 return false;
2402 }
2403 }
2404 /* VT-x also chokes on invalid tr or ldtr selectors (minix) */
2405 if ( pCtx->gdtr.cbGdt
2406 && ( pCtx->tr.Sel > pCtx->gdtr.cbGdt
2407 || pCtx->ldtr.Sel > pCtx->gdtr.cbGdt))
2408 {
2409 return false;
2410 }
2411 }
2412 }
2413 else
2414 {
2415 if ( !CPUMIsGuestInLongModeEx(pCtx)
2416 && !pVM->hm.s.vmx.fUnrestrictedGuest)
2417 {
2418 /** @todo This should (probably) be set on every excursion to the REM,
2419 * however it's too risky right now. So, only apply it when we go
2420 * back to REM for real mode execution. (The XP hack below doesn't
2421 * work reliably without this.)
2422 * Update: Implemented in EM.cpp, see #ifdef EM_NOTIFY_HM. */
2423 pVM->aCpus[0].hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
2424
2425 if ( !pVM->hm.s.fNestedPaging /* requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */
2426 || CPUMIsGuestInRealModeEx(pCtx)) /* requires a fake TSS for real mode - stored in the VMM device heap */
2427 return false;
2428
2429 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
2430 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr.Sel == 0)
2431 return false;
2432
2433 /* The guest is about to complete the switch to protected mode. Wait a bit longer. */
2434 /* Windows XP; switch to protected mode; all selectors are marked not present in the
2435 * hidden registers (possible recompiler bug; see load_seg_vm) */
2436 if (pCtx->cs.Attr.n.u1Present == 0)
2437 return false;
2438 if (pCtx->ss.Attr.n.u1Present == 0)
2439 return false;
2440
2441 /* Windows XP: possible same as above, but new recompiler requires new heuristics?
2442 VT-x doesn't seem to like something about the guest state and this stuff avoids it. */
2443 /** @todo This check is actually wrong, it doesn't take the direction of the
2444 * stack segment into account. But, it does the job for now. */
2445 if (pCtx->rsp >= pCtx->ss.u32Limit)
2446 return false;
2447#if 0
2448 if ( pCtx->cs.Sel >= pCtx->gdtr.cbGdt
2449 || pCtx->ss.Sel >= pCtx->gdtr.cbGdt
2450 || pCtx->ds.Sel >= pCtx->gdtr.cbGdt
2451 || pCtx->es.Sel >= pCtx->gdtr.cbGdt
2452 || pCtx->fs.Sel >= pCtx->gdtr.cbGdt
2453 || pCtx->gs.Sel >= pCtx->gdtr.cbGdt)
2454 return false;
2455#endif
2456 }
2457 }
2458 }
2459
2460 if (pVM->hm.s.vmx.fEnabled)
2461 {
2462 uint32_t mask;
2463
2464 /* if bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
2465 mask = (uint32_t)pVM->hm.s.vmx.msr.vmx_cr0_fixed0;
2466 /* Note: We ignore the NE bit here on purpose; see vmmr0\hmr0.cpp for details. */
2467 mask &= ~X86_CR0_NE;
2468
2469 if (fSupportsRealMode)
2470 {
2471 /* Note: We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
2472 mask &= ~(X86_CR0_PG|X86_CR0_PE);
2473 }
2474 else
2475 {
2476 /* We support protected mode without paging using identity mapping. */
2477 mask &= ~X86_CR0_PG;
2478 }
2479 if ((pCtx->cr0 & mask) != mask)
2480 return false;
2481
2482 /* if bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
2483 mask = (uint32_t)~pVM->hm.s.vmx.msr.vmx_cr0_fixed1;
2484 if ((pCtx->cr0 & mask) != 0)
2485 return false;
2486
2487 /* if bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
2488 mask = (uint32_t)pVM->hm.s.vmx.msr.vmx_cr4_fixed0;
2489 mask &= ~X86_CR4_VMXE;
2490 if ((pCtx->cr4 & mask) != mask)
2491 return false;
2492
2493 /* if bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
2494 mask = (uint32_t)~pVM->hm.s.vmx.msr.vmx_cr4_fixed1;
2495 if ((pCtx->cr4 & mask) != 0)
2496 return false;
2497
2498 pVCpu->hm.s.fActive = true;
2499 return true;
2500 }
2501
2502 return false;
2503}
2504
2505
2506/**
2507 * Checks if we need to reschedule due to VMM device heap changes.
2508 *
2509 * @returns true if a reschedule is required, otherwise false.
2510 * @param pVM Pointer to the VM.
2511 * @param pCtx VM execution context.
2512 */
2513VMMR3DECL(bool) HMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx)
2514{
2515 /*
2516 * The VMM device heap is a requirement for emulating real mode or protected mode without paging
2517 * when the unrestricted guest execution feature is missing (VT-x only).
2518 */
2519 if ( pVM->hm.s.vmx.fEnabled
2520 && !pVM->hm.s.vmx.fUnrestrictedGuest
2521 && !CPUMIsGuestInPagedProtectedModeEx(pCtx)
2522 && !PDMVMMDevHeapIsEnabled(pVM)
2523 && (pVM->hm.s.fNestedPaging || CPUMIsGuestInRealModeEx(pCtx)))
2524 return true;
2525
2526 return false;
2527}
2528
2529
2530/**
2531 * Notification from EM about a rescheduling into hardware assisted execution
2532 * mode.
2533 *
2534 * @param pVCpu Pointer to the current VMCPU.
2535 */
2536VMMR3DECL(void) HMR3NotifyScheduled(PVMCPU pVCpu)
2537{
2538 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
2539}
2540
2541
2542/**
2543 * Notification from EM about returning from instruction emulation (REM / EM).
2544 *
2545 * @param pVCpu Pointer to the VMCPU.
2546 */
2547VMMR3DECL(void) HMR3NotifyEmulated(PVMCPU pVCpu)
2548{
2549 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
2550}
2551
2552
2553/**
2554 * Checks if we are currently using hardware accelerated raw mode.
2555 *
2556 * @returns true if hardware acceleration is being used, otherwise false.
2557 * @param pVCpu Pointer to the VMCPU.
2558 */
2559VMMR3DECL(bool) HMR3IsActive(PVMCPU pVCpu)
2560{
2561 return pVCpu->hm.s.fActive;
2562}
2563
2564
2565/**
2566 * Checks if we are currently using nested paging.
2567 *
2568 * @returns true if nested paging is being used, otherwise false.
2569 * @param pVM Pointer to the VM.
2570 */
2571VMMR3DECL(bool) HMR3IsNestedPagingActive(PVM pVM)
2572{
2573 return pVM->hm.s.fNestedPaging;
2574}
2575
2576
2577/**
2578 * Checks if we are currently using VPID in VT-x mode.
2579 *
2580 * @returns true if VPID is being used, otherwise false.
2581 * @param pVM Pointer to the VM.
2582 */
2583VMMR3DECL(bool) HMR3IsVPIDActive(PVM pVM)
2584{
2585 return pVM->hm.s.vmx.fVpid;
2586}
2587
2588
2589/**
2590 * Checks if internal events are pending. In that case we are not allowed to dispatch interrupts.
2591 *
2592 * @returns true if an internal event is pending, otherwise false.
2593 * @param pVM Pointer to the VM.
2594 */
2595VMMR3DECL(bool) HMR3IsEventPending(PVMCPU pVCpu)
2596{
2597 return HMIsEnabled(pVCpu->pVMR3) && pVCpu->hm.s.Event.fPending;
2598}
2599
2600
2601/**
2602 * Checks if the VMX-preemption timer is being used.
2603 *
2604 * @returns true if the VMX-preemption timer is being used, otherwise false.
2605 * @param pVM Pointer to the VM.
2606 */
2607VMMR3DECL(bool) HMR3IsVmxPreemptionTimerUsed(PVM pVM)
2608{
2609 return HMIsEnabled(pVM)
2610 && pVM->hm.s.vmx.fEnabled
2611 && pVM->hm.s.vmx.fUsePreemptTimer;
2612}
2613
2614
2615/**
2616 * Restart an I/O instruction that was refused in ring-0
2617 *
2618 * @returns Strict VBox status code. Informational status codes other than the one documented
2619 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2620 * @retval VINF_SUCCESS Success.
2621 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2622 * status code must be passed on to EM.
2623 * @retval VERR_NOT_FOUND if no pending I/O instruction.
2624 *
2625 * @param pVM Pointer to the VM.
2626 * @param pVCpu Pointer to the VMCPU.
2627 * @param pCtx Pointer to the guest CPU context.
2628 */
2629VMMR3DECL(VBOXSTRICTRC) HMR3RestartPendingIOInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2630{
2631 HMPENDINGIO enmType = pVCpu->hm.s.PendingIO.enmType;
2632
2633 pVCpu->hm.s.PendingIO.enmType = HMPENDINGIO_INVALID;
2634
2635 if ( pVCpu->hm.s.PendingIO.GCPtrRip != pCtx->rip
2636 || enmType == HMPENDINGIO_INVALID)
2637 return VERR_NOT_FOUND;
2638
2639 VBOXSTRICTRC rcStrict;
2640 switch (enmType)
2641 {
2642 case HMPENDINGIO_PORT_READ:
2643 {
2644 uint32_t uAndVal = pVCpu->hm.s.PendingIO.s.Port.uAndVal;
2645 uint32_t u32Val = 0;
2646
2647 rcStrict = IOMIOPortRead(pVM, pVCpu->hm.s.PendingIO.s.Port.uPort,
2648 &u32Val,
2649 pVCpu->hm.s.PendingIO.s.Port.cbSize);
2650 if (IOM_SUCCESS(rcStrict))
2651 {
2652 /* Write back to the EAX register. */
2653 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
2654 pCtx->rip = pVCpu->hm.s.PendingIO.GCPtrRipNext;
2655 }
2656 break;
2657 }
2658
2659 case HMPENDINGIO_PORT_WRITE:
2660 rcStrict = IOMIOPortWrite(pVM, pVCpu->hm.s.PendingIO.s.Port.uPort,
2661 pCtx->eax & pVCpu->hm.s.PendingIO.s.Port.uAndVal,
2662 pVCpu->hm.s.PendingIO.s.Port.cbSize);
2663 if (IOM_SUCCESS(rcStrict))
2664 pCtx->rip = pVCpu->hm.s.PendingIO.GCPtrRipNext;
2665 break;
2666
2667 default:
2668 AssertLogRelFailedReturn(VERR_HM_UNKNOWN_IO_INSTRUCTION);
2669 }
2670
2671 return rcStrict;
2672}
2673
2674
2675/**
2676 * Inject an NMI into a running VM (only VCPU 0!)
2677 *
2678 * @returns boolean
2679 * @param pVM Pointer to the VM.
2680 */
2681VMMR3DECL(int) HMR3InjectNMI(PVM pVM)
2682{
2683 VMCPU_FF_SET(&pVM->aCpus[0], VMCPU_FF_INTERRUPT_NMI);
2684 return VINF_SUCCESS;
2685}
2686
2687
2688/**
2689 * Check fatal VT-x/AMD-V error and produce some meaningful
2690 * log release message.
2691 *
2692 * @param pVM Pointer to the VM.
2693 * @param iStatusCode VBox status code.
2694 */
2695VMMR3DECL(void) HMR3CheckError(PVM pVM, int iStatusCode)
2696{
2697 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2698 {
2699 switch (iStatusCode)
2700 {
2701 case VERR_VMX_INVALID_VMCS_FIELD:
2702 break;
2703
2704 case VERR_VMX_INVALID_VMCS_PTR:
2705 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hm.s.vmx.HCPhysVMCS));
2706 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current VMCS version %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.ulVMCSRevision));
2707 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Entered Cpu %d\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.idEnteredCpu));
2708 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current Cpu %d\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.idCurrentCpu));
2709 break;
2710
2711 case VERR_VMX_UNABLE_TO_START_VM:
2712 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.ulInstrError));
2713 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.ulExitReason));
2714 if (pVM->aCpus[i].hm.s.vmx.lasterror.ulInstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS)
2715 {
2716 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d MSRBitmapPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap));
2717#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
2718 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d GuestMSRPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysGuestMsr));
2719 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d HostMsrPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysHostMsr));
2720 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d cGuestMSRs %x\n", i, pVM->aCpus[i].hm.s.vmx.cGuestMsrs));
2721#endif
2722 }
2723 /** @todo Log VM-entry event injection control fields
2724 * VMX_VMCS_CTRL_ENTRY_IRQ_INFO, VMX_VMCS_CTRL_ENTRY_EXCEPTION_ERRCODE
2725 * and VMX_VMCS_CTRL_ENTRY_INSTR_LENGTH from the VMCS. */
2726 break;
2727
2728 case VERR_VMX_UNABLE_TO_RESUME_VM:
2729 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.ulInstrError));
2730 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.ulExitReason));
2731 break;
2732
2733 case VERR_VMX_INVALID_VMXON_PTR:
2734 break;
2735 }
2736 }
2737
2738 if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM)
2739 {
2740 LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed %x\n", pVM->hm.s.vmx.msr.vmx_entry.n.allowed1));
2741 LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %x\n", pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0));
2742 }
2743}
2744
2745
2746/**
2747 * Execute state save operation.
2748 *
2749 * @returns VBox status code.
2750 * @param pVM Pointer to the VM.
2751 * @param pSSM SSM operation handle.
2752 */
2753static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM)
2754{
2755 int rc;
2756
2757 Log(("hmR3Save:\n"));
2758
2759 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2760 {
2761 /*
2762 * Save the basic bits - fortunately all the other things can be resynced on load.
2763 */
2764 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.Event.fPending);
2765 AssertRCReturn(rc, rc);
2766 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.Event.errCode);
2767 AssertRCReturn(rc, rc);
2768 rc = SSMR3PutU64(pSSM, pVM->aCpus[i].hm.s.Event.intInfo);
2769 AssertRCReturn(rc, rc);
2770
2771 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.vmx.enmLastSeenGuestMode);
2772 AssertRCReturn(rc, rc);
2773 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.vmx.enmCurrGuestMode);
2774 AssertRCReturn(rc, rc);
2775 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.vmx.enmPrevGuestMode);
2776 AssertRCReturn(rc, rc);
2777 }
2778#ifdef VBOX_HM_WITH_GUEST_PATCHING
2779 rc = SSMR3PutGCPtr(pSSM, pVM->hm.s.pGuestPatchMem);
2780 AssertRCReturn(rc, rc);
2781 rc = SSMR3PutGCPtr(pSSM, pVM->hm.s.pFreeGuestPatchMem);
2782 AssertRCReturn(rc, rc);
2783 rc = SSMR3PutU32(pSSM, pVM->hm.s.cbGuestPatchMem);
2784 AssertRCReturn(rc, rc);
2785
2786 /* Store all the guest patch records too. */
2787 rc = SSMR3PutU32(pSSM, pVM->hm.s.cPatches);
2788 AssertRCReturn(rc, rc);
2789
2790 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
2791 {
2792 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
2793
2794 rc = SSMR3PutU32(pSSM, pPatch->Core.Key);
2795 AssertRCReturn(rc, rc);
2796
2797 rc = SSMR3PutMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
2798 AssertRCReturn(rc, rc);
2799
2800 rc = SSMR3PutU32(pSSM, pPatch->cbOp);
2801 AssertRCReturn(rc, rc);
2802
2803 rc = SSMR3PutMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
2804 AssertRCReturn(rc, rc);
2805
2806 rc = SSMR3PutU32(pSSM, pPatch->cbNewOp);
2807 AssertRCReturn(rc, rc);
2808
2809 AssertCompileSize(HMTPRINSTR, 4);
2810 rc = SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType);
2811 AssertRCReturn(rc, rc);
2812
2813 rc = SSMR3PutU32(pSSM, pPatch->uSrcOperand);
2814 AssertRCReturn(rc, rc);
2815
2816 rc = SSMR3PutU32(pSSM, pPatch->uDstOperand);
2817 AssertRCReturn(rc, rc);
2818
2819 rc = SSMR3PutU32(pSSM, pPatch->pJumpTarget);
2820 AssertRCReturn(rc, rc);
2821
2822 rc = SSMR3PutU32(pSSM, pPatch->cFaults);
2823 AssertRCReturn(rc, rc);
2824 }
2825#endif
2826 return VINF_SUCCESS;
2827}
2828
2829
2830/**
2831 * Execute state load operation.
2832 *
2833 * @returns VBox status code.
2834 * @param pVM Pointer to the VM.
2835 * @param pSSM SSM operation handle.
2836 * @param uVersion Data layout version.
2837 * @param uPass The data pass.
2838 */
2839static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2840{
2841 int rc;
2842
2843 Log(("hmR3Load:\n"));
2844 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
2845
2846 /*
2847 * Validate version.
2848 */
2849 if ( uVersion != HM_SSM_VERSION
2850 && uVersion != HM_SSM_VERSION_NO_PATCHING
2851 && uVersion != HM_SSM_VERSION_2_0_X)
2852 {
2853 AssertMsgFailed(("hmR3Load: Invalid version uVersion=%d!\n", uVersion));
2854 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
2855 }
2856 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2857 {
2858 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hm.s.Event.fPending);
2859 AssertRCReturn(rc, rc);
2860 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hm.s.Event.errCode);
2861 AssertRCReturn(rc, rc);
2862 rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].hm.s.Event.intInfo);
2863 AssertRCReturn(rc, rc);
2864
2865 if (uVersion >= HM_SSM_VERSION_NO_PATCHING)
2866 {
2867 uint32_t val;
2868
2869 rc = SSMR3GetU32(pSSM, &val);
2870 AssertRCReturn(rc, rc);
2871 pVM->aCpus[i].hm.s.vmx.enmLastSeenGuestMode = (PGMMODE)val;
2872
2873 rc = SSMR3GetU32(pSSM, &val);
2874 AssertRCReturn(rc, rc);
2875 pVM->aCpus[i].hm.s.vmx.enmCurrGuestMode = (PGMMODE)val;
2876
2877 rc = SSMR3GetU32(pSSM, &val);
2878 AssertRCReturn(rc, rc);
2879 pVM->aCpus[i].hm.s.vmx.enmPrevGuestMode = (PGMMODE)val;
2880 }
2881 }
2882#ifdef VBOX_HM_WITH_GUEST_PATCHING
2883 if (uVersion > HM_SSM_VERSION_NO_PATCHING)
2884 {
2885 rc = SSMR3GetGCPtr(pSSM, &pVM->hm.s.pGuestPatchMem);
2886 AssertRCReturn(rc, rc);
2887 rc = SSMR3GetGCPtr(pSSM, &pVM->hm.s.pFreeGuestPatchMem);
2888 AssertRCReturn(rc, rc);
2889 rc = SSMR3GetU32(pSSM, &pVM->hm.s.cbGuestPatchMem);
2890 AssertRCReturn(rc, rc);
2891
2892 /* Fetch all TPR patch records. */
2893 rc = SSMR3GetU32(pSSM, &pVM->hm.s.cPatches);
2894 AssertRCReturn(rc, rc);
2895
2896 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
2897 {
2898 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
2899
2900 rc = SSMR3GetU32(pSSM, &pPatch->Core.Key);
2901 AssertRCReturn(rc, rc);
2902
2903 rc = SSMR3GetMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
2904 AssertRCReturn(rc, rc);
2905
2906 rc = SSMR3GetU32(pSSM, &pPatch->cbOp);
2907 AssertRCReturn(rc, rc);
2908
2909 rc = SSMR3GetMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
2910 AssertRCReturn(rc, rc);
2911
2912 rc = SSMR3GetU32(pSSM, &pPatch->cbNewOp);
2913 AssertRCReturn(rc, rc);
2914
2915 rc = SSMR3GetU32(pSSM, (uint32_t *)&pPatch->enmType);
2916 AssertRCReturn(rc, rc);
2917
2918 if (pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT)
2919 pVM->hm.s.fTPRPatchingActive = true;
2920
2921 Assert(pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT || pVM->hm.s.fTPRPatchingActive == false);
2922
2923 rc = SSMR3GetU32(pSSM, &pPatch->uSrcOperand);
2924 AssertRCReturn(rc, rc);
2925
2926 rc = SSMR3GetU32(pSSM, &pPatch->uDstOperand);
2927 AssertRCReturn(rc, rc);
2928
2929 rc = SSMR3GetU32(pSSM, &pPatch->cFaults);
2930 AssertRCReturn(rc, rc);
2931
2932 rc = SSMR3GetU32(pSSM, &pPatch->pJumpTarget);
2933 AssertRCReturn(rc, rc);
2934
2935 Log(("hmR3Load: patch %d\n", i));
2936 Log(("Key = %x\n", pPatch->Core.Key));
2937 Log(("cbOp = %d\n", pPatch->cbOp));
2938 Log(("cbNewOp = %d\n", pPatch->cbNewOp));
2939 Log(("type = %d\n", pPatch->enmType));
2940 Log(("srcop = %d\n", pPatch->uSrcOperand));
2941 Log(("dstop = %d\n", pPatch->uDstOperand));
2942 Log(("cFaults = %d\n", pPatch->cFaults));
2943 Log(("target = %x\n", pPatch->pJumpTarget));
2944 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2945 AssertRC(rc);
2946 }
2947 }
2948#endif
2949
2950 /* Recheck all VCPUs if we can go straight into hm execution mode. */
2951 if (HMIsEnabled(pVM))
2952 {
2953 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2954 {
2955 PVMCPU pVCpu = &pVM->aCpus[i];
2956
2957 HMR3CanExecuteGuest(pVM, CPUMQueryGuestCtxPtr(pVCpu));
2958 }
2959 }
2960 return VINF_SUCCESS;
2961}
2962
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette