VirtualBox

source: vbox/trunk/src/VBox/VMM/HWACCM.cpp@ 18768

Last change on this file since 18768 was 18284, checked in by vboxsync, 16 years ago

HWACCM: Respect VMMIsHwVirtExtForced.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 78.5 KB
Line 
1/* $Id: HWACCM.cpp 18284 2009-03-26 03:28:41Z vboxsync $ */
2/** @file
3 * HWACCM - Intel/AMD VM Hardware Support Manager
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_HWACCM
26#include <VBox/cpum.h>
27#include <VBox/stam.h>
28#include <VBox/mm.h>
29#include <VBox/pdm.h>
30#include <VBox/pgm.h>
31#include <VBox/trpm.h>
32#include <VBox/dbgf.h>
33#include <VBox/patm.h>
34#include <VBox/csam.h>
35#include <VBox/selm.h>
36#include <VBox/rem.h>
37#include <VBox/hwacc_vmx.h>
38#include <VBox/hwacc_svm.h>
39#include "HWACCMInternal.h"
40#include <VBox/vm.h>
41#include <VBox/err.h>
42#include <VBox/param.h>
43
44#include <iprt/assert.h>
45#include <VBox/log.h>
46#include <iprt/asm.h>
47#include <iprt/string.h>
48#include <iprt/thread.h>
49
50/*******************************************************************************
51* Global Variables *
52*******************************************************************************/
53#ifdef VBOX_WITH_STATISTICS
54# define EXIT_REASON(def, val, str) #def " - " #val " - " str
55# define EXIT_REASON_NIL() NULL
56/** Exit reason descriptions for VT-x, used to describe statistics. */
57static const char * const g_apszVTxExitReasons[MAX_EXITREASON_STAT] =
58{
59 EXIT_REASON(VMX_EXIT_EXCEPTION , 0, "Exception or non-maskable interrupt (NMI)."),
60 EXIT_REASON(VMX_EXIT_EXTERNAL_IRQ , 1, "External interrupt."),
61 EXIT_REASON(VMX_EXIT_TRIPLE_FAULT , 2, "Triple fault."),
62 EXIT_REASON(VMX_EXIT_INIT_SIGNAL , 3, "INIT signal."),
63 EXIT_REASON(VMX_EXIT_SIPI , 4, "Start-up IPI (SIPI)."),
64 EXIT_REASON(VMX_EXIT_IO_SMI_IRQ , 5, "I/O system-management interrupt (SMI)."),
65 EXIT_REASON(VMX_EXIT_SMI_IRQ , 6, "Other SMI."),
66 EXIT_REASON(VMX_EXIT_IRQ_WINDOW , 7, "Interrupt window."),
67 EXIT_REASON_NIL(),
68 EXIT_REASON(VMX_EXIT_TASK_SWITCH , 9, "Task switch."),
69 EXIT_REASON(VMX_EXIT_CPUID , 10, "Guest software attempted to execute CPUID."),
70 EXIT_REASON_NIL(),
71 EXIT_REASON(VMX_EXIT_HLT , 12, "Guest software attempted to execute HLT."),
72 EXIT_REASON(VMX_EXIT_INVD , 13, "Guest software attempted to execute INVD."),
73 EXIT_REASON(VMX_EXIT_INVPG , 14, "Guest software attempted to execute INVPG."),
74 EXIT_REASON(VMX_EXIT_RDPMC , 15, "Guest software attempted to execute RDPMC."),
75 EXIT_REASON(VMX_EXIT_RDTSC , 16, "Guest software attempted to execute RDTSC."),
76 EXIT_REASON(VMX_EXIT_RSM , 17, "Guest software attempted to execute RSM in SMM."),
77 EXIT_REASON(VMX_EXIT_VMCALL , 18, "Guest software executed VMCALL."),
78 EXIT_REASON(VMX_EXIT_VMCLEAR , 19, "Guest software executed VMCLEAR."),
79 EXIT_REASON(VMX_EXIT_VMLAUNCH , 20, "Guest software executed VMLAUNCH."),
80 EXIT_REASON(VMX_EXIT_VMPTRLD , 21, "Guest software executed VMPTRLD."),
81 EXIT_REASON(VMX_EXIT_VMPTRST , 22, "Guest software executed VMPTRST."),
82 EXIT_REASON(VMX_EXIT_VMREAD , 23, "Guest software executed VMREAD."),
83 EXIT_REASON(VMX_EXIT_VMRESUME , 24, "Guest software executed VMRESUME."),
84 EXIT_REASON(VMX_EXIT_VMWRITE , 25, "Guest software executed VMWRITE."),
85 EXIT_REASON(VMX_EXIT_VMXOFF , 26, "Guest software executed VMXOFF."),
86 EXIT_REASON(VMX_EXIT_VMXON , 27, "Guest software executed VMXON."),
87 EXIT_REASON(VMX_EXIT_CRX_MOVE , 28, "Control-register accesses."),
88 EXIT_REASON(VMX_EXIT_DRX_MOVE , 29, "Debug-register accesses."),
89 EXIT_REASON(VMX_EXIT_PORT_IO , 30, "I/O instruction."),
90 EXIT_REASON(VMX_EXIT_RDMSR , 31, "RDMSR. Guest software attempted to execute RDMSR."),
91 EXIT_REASON(VMX_EXIT_WRMSR , 32, "WRMSR. Guest software attempted to execute WRMSR."),
92 EXIT_REASON(VMX_EXIT_ERR_INVALID_GUEST_STATE, 33, "VM-entry failure due to invalid guest state."),
93 EXIT_REASON(VMX_EXIT_ERR_MSR_LOAD , 34, "VM-entry failure due to MSR loading."),
94 EXIT_REASON_NIL(),
95 EXIT_REASON(VMX_EXIT_MWAIT , 36, "Guest software executed MWAIT."),
96 EXIT_REASON_NIL(),
97 EXIT_REASON_NIL(),
98 EXIT_REASON(VMX_EXIT_MONITOR , 39, "Guest software attempted to execute MONITOR."),
99 EXIT_REASON(VMX_EXIT_PAUSE , 40, "Guest software attempted to execute PAUSE."),
100 EXIT_REASON(VMX_EXIT_ERR_MACHINE_CHECK , 41, "VM-entry failure due to machine-check."),
101 EXIT_REASON_NIL(),
102 EXIT_REASON(VMX_EXIT_TPR , 43, "TPR below threshold. Guest software executed MOV to CR8."),
103 EXIT_REASON(VMX_EXIT_APIC_ACCESS , 44, "APIC access. Guest software attempted to access memory at a physical address on the APIC-access page."),
104 EXIT_REASON_NIL(),
105 EXIT_REASON(VMX_EXIT_XDTR_ACCESS , 46, "Access to GDTR or IDTR. Guest software attempted to execute LGDT, LIDT, SGDT, or SIDT."),
106 EXIT_REASON(VMX_EXIT_TR_ACCESS , 47, "Access to LDTR or TR. Guest software attempted to execute LLDT, LTR, SLDT, or STR."),
107 EXIT_REASON(VMX_EXIT_EPT_VIOLATION , 48, "EPT violation. An attempt to access memory with a guest-physical address was disallowed by the configuration of the EPT paging structures."),
108 EXIT_REASON(VMX_EXIT_EPT_MISCONFIG , 49, "EPT misconfiguration. An attempt to access memory with a guest-physical address encountered a misconfigured EPT paging-structure entry."),
109 EXIT_REASON(VMX_EXIT_INVEPT , 50, "INVEPT. Guest software attempted to execute INVEPT."),
110 EXIT_REASON_NIL(),
111 EXIT_REASON(VMX_EXIT_PREEMPTION_TIMER , 52, "VMX-preemption timer expired. The preemption timer counted down to zero."),
112 EXIT_REASON(VMX_EXIT_INVVPID , 53, "INVVPID. Guest software attempted to execute INVVPID."),
113 EXIT_REASON(VMX_EXIT_WBINVD , 54, "WBINVD. Guest software attempted to execute WBINVD."),
114 EXIT_REASON(VMX_EXIT_XSETBV , 55, "XSETBV. Guest software attempted to execute XSETBV."),
115 EXIT_REASON_NIL()
116};
117/** Exit reason descriptions for AMD-V, used to describe statistics. */
118static const char * const g_apszAmdVExitReasons[MAX_EXITREASON_STAT] =
119{
120 /** @todo fill in these. */
121 EXIT_REASON_NIL()
122};
123# undef EXIT_REASON
124# undef EXIT_REASON_NIL
125#endif /* VBOX_WITH_STATISTICS */
126
127/*******************************************************************************
128* Internal Functions *
129*******************************************************************************/
130static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM);
131static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
132
133
134/**
135 * Initializes the HWACCM.
136 *
137 * @returns VBox status code.
138 * @param pVM The VM to operate on.
139 */
140VMMR3DECL(int) HWACCMR3Init(PVM pVM)
141{
142 LogFlow(("HWACCMR3Init\n"));
143
144 /*
145 * Assert alignment and sizes.
146 */
147 AssertRelease(!(RT_OFFSETOF(VM, hwaccm.s) & 31));
148 AssertRelease(sizeof(pVM->hwaccm.s) <= sizeof(pVM->hwaccm.padding));
149
150 /* Some structure checks. */
151 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, u8Reserved3) == 0xC0, ("u8Reserved3 offset = %x\n", RT_OFFSETOF(SVM_VMCB, u8Reserved3)));
152 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.EventInject) == 0xA8, ("ctrl.EventInject offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.EventInject)));
153 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo) == 0x88, ("ctrl.ExitIntInfo offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo)));
154 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl) == 0x58, ("ctrl.TLBCtrl offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl)));
155
156 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest) == 0x400, ("guest offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest)));
157 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved4) == 0x4A0, ("guest.u8Reserved4 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved4)));
158 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved6) == 0x4D8, ("guest.u8Reserved6 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved6)));
159 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved7) == 0x580, ("guest.u8Reserved7 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved7)));
160 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved9) == 0x648, ("guest.u8Reserved9 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved9)));
161 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, u8Reserved10) == 0x698, ("u8Reserved3 offset = %x\n", RT_OFFSETOF(SVM_VMCB, u8Reserved10)));
162 AssertReleaseMsg(sizeof(SVM_VMCB) == 0x1000, ("SVM_VMCB size = %x\n", sizeof(SVM_VMCB)));
163
164
165 /*
166 * Register the saved state data unit.
167 */
168 int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HWACCM_SSM_VERSION, sizeof(HWACCM),
169 NULL, hwaccmR3Save, NULL,
170 NULL, hwaccmR3Load, NULL);
171 if (RT_FAILURE(rc))
172 return rc;
173
174 /* Misc initialisation. */
175 pVM->hwaccm.s.vmx.fSupported = false;
176 pVM->hwaccm.s.svm.fSupported = false;
177 pVM->hwaccm.s.vmx.fEnabled = false;
178 pVM->hwaccm.s.svm.fEnabled = false;
179
180 pVM->hwaccm.s.fActive = false;
181 pVM->hwaccm.s.fNestedPaging = false;
182
183 /* Disabled by default. */
184 pVM->fHWACCMEnabled = false;
185
186 /*
187 * Check CFGM options.
188 */
189 PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
190 PCFGMNODE pHWVirtExt = CFGMR3GetChild(pRoot, "HWVirtExt/");
191 /* Nested paging: disabled by default. */
192 rc = CFGMR3QueryBoolDef(pRoot, "EnableNestedPaging", &pVM->hwaccm.s.fAllowNestedPaging, false);
193 AssertRC(rc);
194
195 /* VT-x VPID: disabled by default. */
196 rc = CFGMR3QueryBoolDef(pRoot, "EnableVPID", &pVM->hwaccm.s.vmx.fAllowVPID, false);
197 AssertRC(rc);
198
199 /* HWACCM support must be explicitely enabled in the configuration file. */
200 rc = CFGMR3QueryBoolDef(pHWVirtExt, "Enabled", &pVM->hwaccm.s.fAllowed, false);
201 AssertRC(rc);
202
203#ifdef RT_OS_DARWIN
204 if (VMMIsHwVirtExtForced(pVM) != pVM->hwaccm.s.fAllowed)
205#else
206 if (VMMIsHwVirtExtForced(pVM) && !pVM->hwaccm.s.fAllowed)
207#endif
208 {
209 AssertLogRelMsgFailed(("VMMIsHwVirtExtForced=%RTbool fAllowed=%RTbool\n",
210 VMMIsHwVirtExtForced(pVM), pVM->hwaccm.s.fAllowed));
211 return VERR_HWACCM_CONFIG_MISMATCH;
212 }
213
214 if (VMMIsHwVirtExtForced(pVM))
215 pVM->fHWACCMEnabled = true;
216
217#if HC_ARCH_BITS == 32
218 /* 64-bit mode is configurable and it depends on both the kernel mode and VT-x.
219 * (To use the default, don't set 64bitEnabled in CFGM.) */
220 rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hwaccm.s.fAllow64BitGuests, false);
221 AssertLogRelRCReturn(rc, rc);
222 if (pVM->hwaccm.s.fAllow64BitGuests)
223 {
224# ifdef RT_OS_DARWIN
225 if (!VMMIsHwVirtExtForced(pVM))
226# else
227 if (!pVM->hwaccm.s.fAllowed)
228# endif
229 return VM_SET_ERROR(pVM, VERR_INVALID_PARAMETER, "64-bit guest support was requested without also enabling HWVirtEx (VT-x/AMD-V).");
230 }
231#else
232 /* On 64-bit hosts 64-bit guest support is enabled by default, but allow this to be overridden
233 * via VBoxInternal/HWVirtExt/64bitEnabled=0. (ConsoleImpl2.cpp doesn't set this to false for 64-bit.) */
234 rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hwaccm.s.fAllow64BitGuests, true);
235 AssertLogRelRCReturn(rc, rc);
236#endif
237
238 return VINF_SUCCESS;
239}
240
241/**
242 * Initializes the per-VCPU HWACCM.
243 *
244 * @returns VBox status code.
245 * @param pVM The VM to operate on.
246 */
247VMMR3DECL(int) HWACCMR3InitCPU(PVM pVM)
248{
249 LogFlow(("HWACCMR3InitCPU\n"));
250
251#ifdef VBOX_WITH_STATISTICS
252 /*
253 * Statistics.
254 */
255 for (unsigned i=0;i<pVM->cCPUs;i++)
256 {
257 PVMCPU pVCpu = &pVM->aCpus[i];
258 int rc;
259
260 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatEntry, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode entry",
261 "/PROF/HWACCM/CPU%d/SwitchToGC", i);
262 AssertRC(rc);
263 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 1",
264 "/PROF/HWACCM/CPU%d/SwitchFromGC_1", i);
265 AssertRC(rc);
266 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 2",
267 "/PROF/HWACCM/CPU%d/SwitchFromGC_2", i);
268 AssertRC(rc);
269# if 1 /* temporary for tracking down darwin holdup. */
270 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - I/O",
271 "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub1", i);
272 AssertRC(rc);
273 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - CRx RWs",
274 "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub2", i);
275 AssertRC(rc);
276 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - Exceptions",
277 "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub3", i);
278 AssertRC(rc);
279# endif
280 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatInGC, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of vmlaunch",
281 "/PROF/HWACCM/CPU%d/InGC", i);
282 AssertRC(rc);
283
284# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
285 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatWorldSwitch3264, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of the 32/64 switcher",
286 "/PROF/HWACCM/CPU%d/Switcher3264", i);
287 AssertRC(rc);
288# endif
289
290# define HWACCM_REG_COUNTER(a, b) \
291 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Profiling of vmlaunch", b, i); \
292 AssertRC(rc);
293
294 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowNM, "/HWACCM/CPU%d/Exit/Trap/Shw/#NM");
295 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestNM, "/HWACCM/CPU%d/Exit/Trap/Gst/#NM");
296 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowPF, "/HWACCM/CPU%d/Exit/Trap/Shw/#PF");
297 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestPF, "/HWACCM/CPU%d/Exit/Trap/Gst/#PF");
298 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestUD, "/HWACCM/CPU%d/Exit/Trap/Gst/#UD");
299 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestSS, "/HWACCM/CPU%d/Exit/Trap/Gst/#SS");
300 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestNP, "/HWACCM/CPU%d/Exit/Trap/Gst/#NP");
301 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestGP, "/HWACCM/CPU%d/Exit/Trap/Gst/#GP");
302 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestMF, "/HWACCM/CPU%d/Exit/Trap/Gst/#MF");
303 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestDE, "/HWACCM/CPU%d/Exit/Trap/Gst/#DE");
304 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestDB, "/HWACCM/CPU%d/Exit/Trap/Gst/#DB");
305 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInvpg, "/HWACCM/CPU%d/Exit/Instr/Invlpg");
306 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInvd, "/HWACCM/CPU%d/Exit/Instr/Invd");
307 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCpuid, "/HWACCM/CPU%d/Exit/Instr/Cpuid");
308 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdtsc, "/HWACCM/CPU%d/Exit/Instr/Rdtsc");
309 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitDRxWrite, "/HWACCM/CPU%d/Exit/Instr/DR/Write");
310 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitDRxRead, "/HWACCM/CPU%d/Exit/Instr/DR/Read");
311 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCLTS, "/HWACCM/CPU%d/Exit/Instr/CLTS");
312 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitLMSW, "/HWACCM/CPU%d/Exit/Instr/LMSW");
313 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCli, "/HWACCM/CPU%d/Exit/Instr/Cli");
314 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitSti, "/HWACCM/CPU%d/Exit/Instr/Sti");
315 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPushf, "/HWACCM/CPU%d/Exit/Instr/Pushf");
316 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPopf, "/HWACCM/CPU%d/Exit/Instr/Popf");
317 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIret, "/HWACCM/CPU%d/Exit/Instr/Iret");
318 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInt, "/HWACCM/CPU%d/Exit/Instr/Int");
319 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitHlt, "/HWACCM/CPU%d/Exit/Instr/Hlt");
320 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOWrite, "/HWACCM/CPU%d/Exit/IO/Write");
321 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIORead, "/HWACCM/CPU%d/Exit/IO/Read");
322 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOStringWrite, "/HWACCM/CPU%d/Exit/IO/WriteString");
323 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOStringRead, "/HWACCM/CPU%d/Exit/IO/ReadString");
324 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIrqWindow, "/HWACCM/CPU%d/Exit/IrqWindow");
325 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMaxResume, "/HWACCM/CPU%d/Exit/MaxResume");
326
327 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchGuestIrq, "/HWACCM/CPU%d/Switch/IrqPending");
328 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchToR3, "/HWACCM/CPU%d/Switch/ToR3");
329
330 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatIntInject, "/HWACCM/CPU%d/Irq/Inject");
331 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatIntReinject, "/HWACCM/CPU%d/Irq/Reinject");
332 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatPendingHostIrq, "/HWACCM/CPU%d/Irq/PendingOnHost");
333
334 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPageManual, "/HWACCM/CPU%d/Flush/Page/Virt");
335 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPhysPageManual, "/HWACCM/CPU%d/Flush/Page/Phys");
336 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBManual, "/HWACCM/CPU%d/Flush/TLB/Manual");
337 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBCRxChange, "/HWACCM/CPU%d/Flush/TLB/CRx");
338 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPageInvlpg, "/HWACCM/CPU%d/Flush/Page/Invlpg");
339 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch, "/HWACCM/CPU%d/Flush/TLB/Switch");
340 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch, "/HWACCM/CPU%d/Flush/TLB/Skipped");
341 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushASID, "/HWACCM/CPU%d/Flush/TLB/ASID");
342 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBInvlpga, "/HWACCM/CPU%d/Flush/TLB/PhysInvl");
343
344 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCOffset, "/HWACCM/CPU%d/TSC/Offset");
345 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCIntercept, "/HWACCM/CPU%d/TSC/Intercept");
346
347 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxArmed, "/HWACCM/CPU%d/Debug/Armed");
348 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxContextSwitch, "/HWACCM/CPU%d/Debug/ContextSwitch");
349 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxIOCheck, "/HWACCM/CPU%d/Debug/IOCheck");
350
351 for (unsigned j=0;j<RT_ELEMENTS(pVCpu->hwaccm.s.StatExitCRxWrite);j++)
352 {
353 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitCRxWrite[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx writes",
354 "/HWACCM/CPU%d/Exit/Instr/CR/Write/%x", i, j);
355 AssertRC(rc);
356 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitCRxRead[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx reads",
357 "/HWACCM/CPU%d/Exit/Instr/CR/Read/%x", i, j);
358 AssertRC(rc);
359 }
360
361#undef HWACCM_REG_COUNTER
362
363 pVCpu->hwaccm.s.paStatExitReason = NULL;
364
365 rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT*sizeof(*pVCpu->hwaccm.s.paStatExitReason), 0, MM_TAG_HWACCM, (void **)&pVCpu->hwaccm.s.paStatExitReason);
366 AssertRC(rc);
367 if (RT_SUCCESS(rc))
368 {
369 const char * const *papszDesc = ASMIsIntelCpu() ? &g_apszVTxExitReasons[0] : &g_apszAmdVExitReasons[0];
370 for (int j=0;j<MAX_EXITREASON_STAT;j++)
371 {
372 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
373 papszDesc[j] ? papszDesc[j] : "Exit reason",
374 "/HWACCM/CPU%d/Exit/Reason/%02x", i, j);
375 AssertRC(rc);
376 }
377 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitReasonNPF, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Nested page fault", "/HWACCM/CPU%d/Exit/Reason/#NPF", i);
378 AssertRC(rc);
379 }
380 pVCpu->hwaccm.s.paStatExitReasonR0 = MMHyperR3ToR0(pVM, pVCpu->hwaccm.s.paStatExitReason);
381# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
382 Assert(pVCpu->hwaccm.s.paStatExitReasonR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
383# else
384 Assert(pVCpu->hwaccm.s.paStatExitReasonR0 != NIL_RTR0PTR);
385# endif
386 }
387#endif /* VBOX_WITH_STATISTICS */
388
389#ifdef VBOX_WITH_CRASHDUMP_MAGIC
390 /* Magic marker for searching in crash dumps. */
391 for (unsigned i=0;i<pVM->cCPUs;i++)
392 {
393 PVMCPU pVCpu = &pVM->aCpus[i];
394
395 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
396 strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
397 pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
398 }
399#endif
400 return VINF_SUCCESS;
401}
402
403/**
404 * Turns off normal raw mode features
405 *
406 * @param pVM The VM to operate on.
407 */
408static void hwaccmR3DisableRawMode(PVM pVM)
409{
410 /* Disable PATM & CSAM. */
411 PATMR3AllowPatching(pVM, false);
412 CSAMDisableScanning(pVM);
413
414 /* Turn off IDT/LDT/GDT and TSS monitoring and sycing. */
415 SELMR3DisableMonitoring(pVM);
416 TRPMR3DisableMonitoring(pVM);
417
418 /* The hidden selector registers are now valid. */
419 CPUMSetHiddenSelRegsValid(pVM, true);
420
421 /* Disable the switcher code (safety precaution). */
422 VMMR3DisableSwitcher(pVM);
423
424 /* Disable mapping of the hypervisor into the shadow page table. */
425 PGMR3MappingsDisable(pVM);
426
427 /* Disable the switcher */
428 VMMR3DisableSwitcher(pVM);
429
430 /* Reinit the paging mode to force the new shadow mode. */
431 PGMR3ChangeMode(pVM, PGMMODE_REAL);
432}
433
434/**
435 * Initialize VT-x or AMD-V.
436 *
437 * @returns VBox status code.
438 * @param pVM The VM handle.
439 */
440VMMR3DECL(int) HWACCMR3InitFinalizeR0(PVM pVM)
441{
442 int rc;
443
444 if ( !pVM->hwaccm.s.vmx.fSupported
445 && !pVM->hwaccm.s.svm.fSupported)
446 {
447 LogRel(("HWACCM: No VT-x or AMD-V CPU extension found. Reason %Rrc\n", pVM->hwaccm.s.lLastError));
448 LogRel(("HWACCM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
449 if (VMMIsHwVirtExtForced(pVM))
450 return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is not available.");
451 return VINF_SUCCESS;
452 }
453
454 if (!pVM->hwaccm.s.fAllowed)
455 return VINF_SUCCESS; /* nothing to do */
456
457 /* Enable VT-x or AMD-V on all host CPUs. */
458 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_ENABLE, 0, NULL);
459 if (RT_FAILURE(rc))
460 {
461 LogRel(("HWACCMR3InitFinalize: SUPCallVMMR0Ex VMMR0_DO_HWACC_ENABLE failed with %Rrc\n", rc));
462 return rc;
463 }
464 Assert(!pVM->fHWACCMEnabled || VMMIsHwVirtExtForced(pVM));
465
466 if (pVM->hwaccm.s.vmx.fSupported)
467 {
468 Log(("pVM->hwaccm.s.vmx.fSupported = %d\n", pVM->hwaccm.s.vmx.fSupported));
469
470 if ( pVM->hwaccm.s.fInitialized == false
471 && pVM->hwaccm.s.vmx.msr.feature_ctrl != 0)
472 {
473 uint64_t val;
474 RTGCPHYS GCPhys = 0;
475
476 LogRel(("HWACCM: Host CR4=%08X\n", pVM->hwaccm.s.vmx.hostCR4));
477 LogRel(("HWACCM: MSR_IA32_FEATURE_CONTROL = %RX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
478 LogRel(("HWACCM: MSR_IA32_VMX_BASIC_INFO = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_basic_info));
479 LogRel(("HWACCM: VMCS id = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
480 LogRel(("HWACCM: VMCS size = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
481 LogRel(("HWACCM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hwaccm.s.vmx.msr.vmx_basic_info) ? "< 4 GB" : "None"));
482 LogRel(("HWACCM: VMCS memory type = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
483 LogRel(("HWACCM: Dual monitor treatment = %d\n", MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
484
485 LogRel(("HWACCM: MSR_IA32_VMX_PINBASED_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.u));
486 val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
487 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
488 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT\n"));
489 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
490 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT\n"));
491 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI)
492 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI\n"));
493 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
494 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER\n"));
495 val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
496 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
497 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT *must* be set\n"));
498 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
499 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT *must* be set\n"));
500 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI)
501 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI *must* be set\n"));
502 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
503 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER *must* be set\n"));
504
505 LogRel(("HWACCM: MSR_IA32_VMX_PROCBASED_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.u));
506 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
507 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
508 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT\n"));
509 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
510 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET\n"));
511 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
512 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT\n"));
513 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
514 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT\n"));
515 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
516 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT\n"));
517 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
518 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT\n"));
519 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
520 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT\n"));
521 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT)
522 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT\n"));
523 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT)
524 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT\n"));
525 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
526 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT\n"));
527 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
528 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT\n"));
529 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
530 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW\n"));
531 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT)
532 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT\n"));
533 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
534 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT\n"));
535 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
536 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT\n"));
537 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
538 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS\n"));
539 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
540 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG\n"));
541 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
542 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS\n"));
543 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
544 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT\n"));
545 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
546 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT\n"));
547 if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
548 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL\n"));
549
550 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
551 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
552 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT *must* be set\n"));
553 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
554 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET *must* be set\n"));
555 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
556 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT *must* be set\n"));
557 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
558 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT *must* be set\n"));
559 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
560 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT *must* be set\n"));
561 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
562 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT *must* be set\n"));
563 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
564 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT *must* be set\n"));
565 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT)
566 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT *must* be set\n"));
567 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT)
568 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT *must* be set\n"));
569 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
570 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT *must* be set\n"));
571 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
572 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT *must* be set\n"));
573 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
574 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW *must* be set\n"));
575 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT)
576 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT *must* be set\n"));
577 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
578 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT *must* be set\n"));
579 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
580 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT *must* be set\n"));
581 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
582 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS *must* be set\n"));
583 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
584 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG *must* be set\n"));
585 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
586 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS *must* be set\n"));
587 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
588 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT *must* be set\n"));
589 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
590 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT *must* be set\n"));
591 if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
592 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL *must* be set\n"));
593
594 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
595 {
596 LogRel(("HWACCM: MSR_IA32_VMX_PROCBASED_CTLS2 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.u));
597 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
598 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
599 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC\n"));
600 if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
601 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_EPT\n"));
602 if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
603 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT\n"));
604 if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
605 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC\n"));
606 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
607 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VPID\n"));
608 if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
609 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT\n"));
610
611 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;
612 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
613 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC *must* be set\n"));
614 if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
615 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT *must* be set\n"));
616 if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
617 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC *must* be set\n"));
618 if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
619 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_EPT *must* be set\n"));
620 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
621 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VPID *must* be set\n"));
622 if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
623 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT *must* be set\n"));
624 }
625
626 LogRel(("HWACCM: MSR_IA32_VMX_ENTRY_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_entry.u));
627 val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1;
628 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
629 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG\n"));
630 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
631 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE\n"));
632 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
633 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM\n"));
634 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
635 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON\n"));
636 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR)
637 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR\n"));
638 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR)
639 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR\n"));
640 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
641 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR\n"));
642 val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0;
643 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
644 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG *must* be set\n"));
645 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
646 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE *must* be set\n"));
647 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
648 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM *must* be set\n"));
649 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
650 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON *must* be set\n"));
651 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR)
652 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR *must* be set\n"));
653 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR)
654 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR *must* be set\n"));
655 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
656 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR *must* be set\n"));
657
658 LogRel(("HWACCM: MSR_IA32_VMX_EXIT_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_exit.u));
659 val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1;
660 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG)
661 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG\n"));
662 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
663 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64\n"));
664 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
665 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ\n"));
666 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR)
667 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR\n"));
668 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR)
669 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR\n"));
670 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR)
671 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR\n"));
672 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
673 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR\n"));
674 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
675 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER\n"));
676 val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0;
677 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG)
678 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG *must* be set\n"));
679 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
680 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64 *must* be set\n"));
681 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
682 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ *must* be set\n"));
683 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR)
684 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR *must* be set\n"));
685 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR)
686 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR *must* be set\n"));
687 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR)
688 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR *must* be set\n"));
689 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
690 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR *must* be set\n"));
691 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
692 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER *must* be set\n"));
693
694 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps)
695 {
696 LogRel(("HWACCM: MSR_IA32_VMX_EPT_VPID_CAPS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_eptcaps));
697
698 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY)
699 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY\n"));
700 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY)
701 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY\n"));
702 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY)
703 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY\n"));
704 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS)
705 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS\n"));
706 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS)
707 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS\n"));
708 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS)
709 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS\n"));
710 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS)
711 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS\n"));
712 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS)
713 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS\n"));
714 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_UC)
715 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_UC\n"));
716 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WC)
717 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WC\n"));
718 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WT)
719 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WT\n"));
720 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WP)
721 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WP\n"));
722 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WB)
723 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WB\n"));
724 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_21_BITS)
725 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_21_BITS\n"));
726 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_30_BITS)
727 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_30_BITS\n"));
728 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_39_BITS)
729 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_39_BITS\n"));
730 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_48_BITS)
731 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_48_BITS\n"));
732 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT)
733 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT\n"));
734 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_INDIV)
735 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_INDIV\n"));
736 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_CONTEXT)
737 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_CONTEXT\n"));
738 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL)
739 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL\n"));
740 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID)
741 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID\n"));
742 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV)
743 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV\n"));
744 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT)
745 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT\n"));
746 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL)
747 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL\n"));
748 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT_GLOBAL)
749 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT_GLOBAL\n"));
750 }
751
752 LogRel(("HWACCM: MSR_IA32_VMX_MISC = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_misc));
753 LogRel(("HWACCM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc)));
754 LogRel(("HWACCM: MSR_IA32_VMX_MISC_ACTIVITY_STATES %x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hwaccm.s.vmx.msr.vmx_misc)));
755 LogRel(("HWACCM: MSR_IA32_VMX_MISC_CR3_TARGET %x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hwaccm.s.vmx.msr.vmx_misc)));
756 LogRel(("HWACCM: MSR_IA32_VMX_MISC_MAX_MSR %x\n", MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc)));
757 LogRel(("HWACCM: MSR_IA32_VMX_MISC_MSEG_ID %x\n", MSR_IA32_VMX_MISC_MSEG_ID(pVM->hwaccm.s.vmx.msr.vmx_misc)));
758
759 LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED0 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0));
760 LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED1 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1));
761 LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED0 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0));
762 LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED1 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1));
763 LogRel(("HWACCM: MSR_IA32_VMX_VMCS_ENUM = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum));
764
765 LogRel(("HWACCM: TPR shadow physaddr = %RHp\n", pVM->hwaccm.s.vmx.pAPICPhys));
766 LogRel(("HWACCM: MSR bitmap physaddr = %RHp\n", pVM->hwaccm.s.vmx.pMSRBitmapPhys));
767
768 for (unsigned i=0;i<pVM->cCPUs;i++)
769 LogRel(("HWACCM: VMCS physaddr VCPU%d = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys));
770
771#ifdef HWACCM_VTX_WITH_EPT
772 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
773 pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;
774#endif /* HWACCM_VTX_WITH_EPT */
775#ifdef HWACCM_VTX_WITH_VPID
776 if ( (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
777 && !pVM->hwaccm.s.fNestedPaging) /* VPID and EPT are mutually exclusive. */
778 pVM->hwaccm.s.vmx.fVPID = pVM->hwaccm.s.vmx.fAllowVPID;
779#endif /* HWACCM_VTX_WITH_VPID */
780
781 /* Only try once. */
782 pVM->hwaccm.s.fInitialized = true;
783
784 /* Allocate three pages for the TSS we need for real mode emulation. (2 page for the IO bitmap) */
785#if 1
786 rc = PDMR3VMMDevHeapAlloc(pVM, HWACCM_VTX_TOTAL_DEVHEAP_MEM, (RTR3PTR *)&pVM->hwaccm.s.vmx.pRealModeTSS);
787#else
788 rc = VERR_NO_MEMORY; /* simulation of no VMMDev Heap. */
789#endif
790 if (RT_SUCCESS(rc))
791 {
792 /* The I/O bitmap starts right after the virtual interrupt redirection bitmap. */
793 ASMMemZero32(pVM->hwaccm.s.vmx.pRealModeTSS, sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS));
794 pVM->hwaccm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS);
795 /* Bit set to 0 means redirection enabled. */
796 memset(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap, 0x0, sizeof(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap));
797 /* Allow all port IO, so the VT-x IO intercepts do their job. */
798 memset(pVM->hwaccm.s.vmx.pRealModeTSS + 1, 0, PAGE_SIZE*2);
799 *((unsigned char *)pVM->hwaccm.s.vmx.pRealModeTSS + HWACCM_VTX_TSS_SIZE - 2) = 0xff;
800
801 /* Construct a 1024 element page directory with 4 MB pages for the identity mapped page table used in
802 * real and protected mode without paging with EPT.
803 */
804 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hwaccm.s.vmx.pRealModeTSS + PAGE_SIZE * 3);
805 for (unsigned i=0;i<X86_PG_ENTRIES;i++)
806 {
807 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable->a[i].u = _4M * i;
808 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_G;
809 }
810
811 /* We convert it here every time as pci regions could be reconfigured. */
812 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pRealModeTSS, &GCPhys);
813 AssertRC(rc);
814 LogRel(("HWACCM: Real Mode TSS guest physaddr = %RGp\n", GCPhys));
815
816 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
817 AssertRC(rc);
818 LogRel(("HWACCM: Non-Paging Mode EPT CR3 = %RGp\n", GCPhys));
819 }
820 else
821 {
822 LogRel(("HWACCM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));
823 pVM->hwaccm.s.vmx.pRealModeTSS = NULL;
824 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable = NULL;
825 }
826
827 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
828 AssertRC(rc);
829 if (rc == VINF_SUCCESS)
830 {
831 pVM->fHWACCMEnabled = true;
832 pVM->hwaccm.s.vmx.fEnabled = true;
833 hwaccmR3DisableRawMode(pVM);
834
835 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
836#ifdef VBOX_ENABLE_64_BITS_GUESTS
837 if (pVM->hwaccm.s.fAllow64BitGuests)
838 {
839 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
840 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
841 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* 64 bits only on Intel CPUs */
842 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
843 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
844 }
845 LogRel((pVM->hwaccm.s.fAllow64BitGuests
846 ? "HWACCM: 32-bit and 64-bit guests supported.\n"
847 : "HWACCM: 32-bit guests supported.\n"));
848#else
849 LogRel(("HWACCM: 32-bit guests supported.\n"));
850#endif
851 LogRel(("HWACCM: VMX enabled!\n"));
852 if (pVM->hwaccm.s.fNestedPaging)
853 {
854 LogRel(("HWACCM: Enabled nested paging\n"));
855 LogRel(("HWACCM: EPT root page = %RHp\n", PGMGetHyperCR3(pVM)));
856 }
857 if (pVM->hwaccm.s.vmx.fVPID)
858 LogRel(("HWACCM: Enabled VPID\n"));
859
860 if ( pVM->hwaccm.s.fNestedPaging
861 || pVM->hwaccm.s.vmx.fVPID)
862 {
863 LogRel(("HWACCM: enmFlushPage %d\n", pVM->hwaccm.s.vmx.enmFlushPage));
864 LogRel(("HWACCM: enmFlushContext %d\n", pVM->hwaccm.s.vmx.enmFlushContext));
865 }
866 }
867 else
868 {
869 LogRel(("HWACCM: VMX setup failed with rc=%Rrc!\n", rc));
870 LogRel(("HWACCM: Last instruction error %x\n", pVM->aCpus[0].hwaccm.s.vmx.lasterror.ulInstrError));
871 pVM->fHWACCMEnabled = false;
872 }
873 }
874 }
875 else
876 if (pVM->hwaccm.s.svm.fSupported)
877 {
878 Log(("pVM->hwaccm.s.svm.fSupported = %d\n", pVM->hwaccm.s.svm.fSupported));
879
880 if (pVM->hwaccm.s.fInitialized == false)
881 {
882 /* Erratum 170 which requires a forced TLB flush for each world switch:
883 * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
884 *
885 * All BH-G1/2 and DH-G1/2 models include a fix:
886 * Athlon X2: 0x6b 1/2
887 * 0x68 1/2
888 * Athlon 64: 0x7f 1
889 * 0x6f 2
890 * Sempron: 0x7f 1/2
891 * 0x6f 2
892 * 0x6c 2
893 * 0x7c 2
894 * Turion 64: 0x68 2
895 *
896 */
897 uint32_t u32Dummy;
898 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
899 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
900 u32BaseFamily= (u32Version >> 8) & 0xf;
901 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
902 u32Model = ((u32Version >> 4) & 0xf);
903 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
904 u32Stepping = u32Version & 0xf;
905 if ( u32Family == 0xf
906 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
907 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
908 {
909 LogRel(("HWACMM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
910 }
911
912 LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureECX = %RX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureECX));
913 LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureEDX = %RX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureEDX));
914 LogRel(("HWACCM: AMD-V revision = %X\n", pVM->hwaccm.s.svm.u32Rev));
915 LogRel(("HWACCM: AMD-V max ASID = %d\n", pVM->hwaccm.s.uMaxASID));
916 LogRel(("HWACCM: AMD-V features = %X\n", pVM->hwaccm.s.svm.u32Features));
917
918 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
919 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING\n"));
920 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT)
921 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT\n"));
922 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK)
923 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK\n"));
924 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
925 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE\n"));
926 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_SSE_3_5_DISABLE)
927 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_SSE_3_5_DISABLE\n"));
928
929 /* Only try once. */
930 pVM->hwaccm.s.fInitialized = true;
931
932 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
933 pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;
934
935 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
936 AssertRC(rc);
937 if (rc == VINF_SUCCESS)
938 {
939 pVM->fHWACCMEnabled = true;
940 pVM->hwaccm.s.svm.fEnabled = true;
941
942 if (pVM->hwaccm.s.fNestedPaging)
943 LogRel(("HWACCM: Enabled nested paging\n"));
944
945 hwaccmR3DisableRawMode(pVM);
946 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
947 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);
948 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
949#ifdef VBOX_ENABLE_64_BITS_GUESTS
950 if (pVM->hwaccm.s.fAllow64BitGuests)
951 {
952 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
953 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
954 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
955 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
956 }
957#endif
958 LogRel((pVM->hwaccm.s.fAllow64BitGuests
959 ? "HWACCM: 32-bit and 64-bit guest supported.\n"
960 : "HWACCM: 32-bit guest supported.\n"));
961 }
962 else
963 {
964 pVM->fHWACCMEnabled = false;
965 }
966 }
967 }
968 return VINF_SUCCESS;
969}
970
971/**
972 * Applies relocations to data and code managed by this
973 * component. This function will be called at init and
974 * whenever the VMM need to relocate it self inside the GC.
975 *
976 * @param pVM The VM.
977 */
978VMMR3DECL(void) HWACCMR3Relocate(PVM pVM)
979{
980 Log(("HWACCMR3Relocate to %RGv\n", MMHyperGetArea(pVM, 0)));
981
982 /* Fetch the current paging mode during the relocate callback during state loading. */
983 if (VMR3GetState(pVM) == VMSTATE_LOADING)
984 {
985 for (unsigned i=0;i<pVM->cCPUs;i++)
986 {
987 PVMCPU pVCpu = &pVM->aCpus[i];
988 /* @todo SMP */
989 pVCpu->hwaccm.s.enmShadowMode = PGMGetShadowMode(pVM);
990 pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = PGMGetGuestMode(pVM);
991 }
992 }
993#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
994 if (pVM->fHWACCMEnabled)
995 {
996 int rc;
997
998 switch(PGMGetHostMode(pVM))
999 {
1000 case PGMMODE_32_BIT:
1001 pVM->hwaccm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_32_TO_AMD64);
1002 break;
1003
1004 case PGMMODE_PAE:
1005 case PGMMODE_PAE_NX:
1006 pVM->hwaccm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_PAE_TO_AMD64);
1007 break;
1008
1009 default:
1010 AssertFailed();
1011 break;
1012 }
1013 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "VMXGCStartVM64", &pVM->hwaccm.s.pfnVMXGCStartVM64);
1014 AssertReleaseMsgRC(rc, ("VMXGCStartVM64 -> rc=%Rrc\n", rc));
1015
1016 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "SVMGCVMRun64", &pVM->hwaccm.s.pfnSVMGCVMRun64);
1017 AssertReleaseMsgRC(rc, ("SVMGCVMRun64 -> rc=%Rrc\n", rc));
1018
1019 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HWACCMSaveGuestFPU64", &pVM->hwaccm.s.pfnSaveGuestFPU64);
1020 AssertReleaseMsgRC(rc, ("HWACCMSetupFPU64 -> rc=%Rrc\n", rc));
1021
1022 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HWACCMSaveGuestDebug64", &pVM->hwaccm.s.pfnSaveGuestDebug64);
1023 AssertReleaseMsgRC(rc, ("HWACCMSetupDebug64 -> rc=%Rrc\n", rc));
1024
1025# ifdef DEBUG
1026 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HWACCMTestSwitcher64", &pVM->hwaccm.s.pfnTest64);
1027 AssertReleaseMsgRC(rc, ("HWACCMTestSwitcher64 -> rc=%Rrc\n", rc));
1028# endif
1029 }
1030#endif
1031 return;
1032}
1033
1034/**
1035 * Checks hardware accelerated raw mode is allowed.
1036 *
1037 * @returns boolean
1038 * @param pVM The VM to operate on.
1039 */
1040VMMR3DECL(bool) HWACCMR3IsAllowed(PVM pVM)
1041{
1042 return pVM->hwaccm.s.fAllowed;
1043}
1044
1045/**
1046 * Notification callback which is called whenever there is a chance that a CR3
1047 * value might have changed.
1048 *
1049 * This is called by PGM.
1050 *
1051 * @param pVM The VM to operate on.
1052 * @param enmShadowMode New shadow paging mode.
1053 * @param enmGuestMode New guest paging mode.
1054 */
1055VMMR3DECL(void) HWACCMR3PagingModeChanged(PVM pVM, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
1056{
1057 /* Ignore page mode changes during state loading. */
1058 if (VMR3GetState(pVM) == VMSTATE_LOADING)
1059 return;
1060
1061 PVMCPU pVCpu = VMMGetCpu(pVM);
1062 pVCpu->hwaccm.s.enmShadowMode = enmShadowMode;
1063
1064 if ( pVM->hwaccm.s.vmx.fEnabled
1065 && pVM->fHWACCMEnabled)
1066 {
1067 if ( pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
1068 && enmGuestMode >= PGMMODE_PROTECTED)
1069 {
1070 PCPUMCTX pCtx;
1071
1072 pCtx = CPUMQueryGuestCtxPtr(pVM);
1073
1074 /* After a real mode switch to protected mode we must force
1075 * CPL to 0. Our real mode emulation had to set it to 3.
1076 */
1077 pCtx->ssHid.Attr.n.u2Dpl = 0;
1078 }
1079 }
1080
1081 if (pVCpu->hwaccm.s.vmx.enmCurrGuestMode != enmGuestMode)
1082 {
1083 /* Keep track of paging mode changes. */
1084 pVCpu->hwaccm.s.vmx.enmPrevGuestMode = pVCpu->hwaccm.s.vmx.enmCurrGuestMode;
1085 pVCpu->hwaccm.s.vmx.enmCurrGuestMode = enmGuestMode;
1086
1087 /* Did we miss a change, because all code was executed in the recompiler? */
1088 if (pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == enmGuestMode)
1089 {
1090 Log(("HWACCMR3PagingModeChanged missed %s->%s transition (prev %s)\n", PGMGetModeName(pVCpu->hwaccm.s.vmx.enmPrevGuestMode), PGMGetModeName(pVCpu->hwaccm.s.vmx.enmCurrGuestMode), PGMGetModeName(pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode)));
1091 pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = pVCpu->hwaccm.s.vmx.enmPrevGuestMode;
1092 }
1093 }
1094
1095 /* Reset the contents of the read cache. */
1096 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
1097 for (unsigned j=0;j<pCache->Read.cValidEntries;j++)
1098 pCache->Read.aFieldVal[j] = 0;
1099}
1100
1101/**
1102 * Terminates the HWACCM.
1103 *
1104 * Termination means cleaning up and freeing all resources,
1105 * the VM it self is at this point powered off or suspended.
1106 *
1107 * @returns VBox status code.
1108 * @param pVM The VM to operate on.
1109 */
1110VMMR3DECL(int) HWACCMR3Term(PVM pVM)
1111{
1112 if (pVM->hwaccm.s.vmx.pRealModeTSS)
1113 {
1114 PDMR3VMMDevHeapFree(pVM, pVM->hwaccm.s.vmx.pRealModeTSS);
1115 pVM->hwaccm.s.vmx.pRealModeTSS = 0;
1116 }
1117 HWACCMR3TermCPU(pVM);
1118 return 0;
1119}
1120
1121/**
1122 * Terminates the per-VCPU HWACCM.
1123 *
1124 * Termination means cleaning up and freeing all resources,
1125 * the VM it self is at this point powered off or suspended.
1126 *
1127 * @returns VBox status code.
1128 * @param pVM The VM to operate on.
1129 */
1130VMMR3DECL(int) HWACCMR3TermCPU(PVM pVM)
1131{
1132 for (unsigned i=0;i<pVM->cCPUs;i++)
1133 {
1134 PVMCPU pVCpu = &pVM->aCpus[i];
1135
1136 if (pVCpu->hwaccm.s.paStatExitReason)
1137 {
1138 MMHyperFree(pVM, pVCpu->hwaccm.s.paStatExitReason);
1139 pVCpu->hwaccm.s.paStatExitReason = NULL;
1140 pVCpu->hwaccm.s.paStatExitReasonR0 = NIL_RTR0PTR;
1141 }
1142#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1143 memset(pVCpu->hwaccm.s.vmx.VMCSCache.aMagic, 0, sizeof(pVCpu->hwaccm.s.vmx.VMCSCache.aMagic));
1144 pVCpu->hwaccm.s.vmx.VMCSCache.uMagic = 0;
1145 pVCpu->hwaccm.s.vmx.VMCSCache.uPos = 0xffffffff;
1146#endif
1147 }
1148 return 0;
1149}
1150
1151/**
1152 * The VM is being reset.
1153 *
1154 * For the HWACCM component this means that any GDT/LDT/TSS monitors
1155 * needs to be removed.
1156 *
1157 * @param pVM VM handle.
1158 */
1159VMMR3DECL(void) HWACCMR3Reset(PVM pVM)
1160{
1161 LogFlow(("HWACCMR3Reset:\n"));
1162
1163 if (pVM->fHWACCMEnabled)
1164 hwaccmR3DisableRawMode(pVM);
1165
1166 for (unsigned i=0;i<pVM->cCPUs;i++)
1167 {
1168 PVMCPU pVCpu = &pVM->aCpus[i];
1169
1170 /* On first entry we'll sync everything. */
1171 pVCpu->hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
1172
1173 pVCpu->hwaccm.s.vmx.cr0_mask = 0;
1174 pVCpu->hwaccm.s.vmx.cr4_mask = 0;
1175
1176 pVCpu->hwaccm.s.Event.fPending = false;
1177
1178 /* Reset state information for real-mode emulation in VT-x. */
1179 pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
1180 pVCpu->hwaccm.s.vmx.enmPrevGuestMode = PGMMODE_REAL;
1181 pVCpu->hwaccm.s.vmx.enmCurrGuestMode = PGMMODE_REAL;
1182
1183 /* Reset the contents of the read cache. */
1184 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
1185 for (unsigned j=0;j<pCache->Read.cValidEntries;j++)
1186 pCache->Read.aFieldVal[j] = 0;
1187
1188#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1189 /* Magic marker for searching in crash dumps. */
1190 strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
1191 pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
1192#endif
1193 }
1194}
1195
1196/**
1197 * Force execution of the current IO code in the recompiler
1198 *
1199 * @returns VBox status code.
1200 * @param pVM The VM to operate on.
1201 * @param pCtx Partial VM execution context
1202 */
1203VMMR3DECL(int) HWACCMR3EmulateIoBlock(PVM pVM, PCPUMCTX pCtx)
1204{
1205 PVMCPU pVCpu = VMMGetCpu(pVM);
1206
1207 Assert(pVM->fHWACCMEnabled);
1208 Log(("HWACCMR3EmulateIoBlock\n"));
1209
1210 /* This is primarily intended to speed up Grub, so we don't care about paged protected mode. */
1211 if (HWACCMCanEmulateIoBlockEx(pCtx))
1212 {
1213 Log(("HWACCMR3EmulateIoBlock -> enabled\n"));
1214 pVCpu->hwaccm.s.EmulateIoBlock.fEnabled = true;
1215 pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip = pCtx->rip;
1216 pVCpu->hwaccm.s.EmulateIoBlock.cr0 = pCtx->cr0;
1217 return VINF_EM_RESCHEDULE_REM;
1218 }
1219 return VINF_SUCCESS;
1220}
1221
1222/**
1223 * Checks if we can currently use hardware accelerated raw mode.
1224 *
1225 * @returns boolean
1226 * @param pVM The VM to operate on.
1227 * @param pCtx Partial VM execution context
1228 */
1229VMMR3DECL(bool) HWACCMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)
1230{
1231 PVMCPU pVCpu = VMMGetCpu(pVM);
1232
1233 Assert(pVM->fHWACCMEnabled);
1234
1235 /* If we're still executing the IO code, then return false. */
1236 if ( RT_UNLIKELY(pVCpu->hwaccm.s.EmulateIoBlock.fEnabled)
1237 && pCtx->rip < pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip + 0x200
1238 && pCtx->rip > pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip - 0x200
1239 && pCtx->cr0 == pVCpu->hwaccm.s.EmulateIoBlock.cr0)
1240 return false;
1241
1242 pVCpu->hwaccm.s.EmulateIoBlock.fEnabled = false;
1243
1244 /* AMD-V supports real & protected mode with or without paging. */
1245 if (pVM->hwaccm.s.svm.fEnabled)
1246 {
1247 pVM->hwaccm.s.fActive = true;
1248 return true;
1249 }
1250
1251 pVM->hwaccm.s.fActive = false;
1252
1253 /* Note! The context supplied by REM is partial. If we add more checks here, be sure to verify that REM provides this info! */
1254#ifdef HWACCM_VMX_EMULATE_REALMODE
1255 if (pVM->hwaccm.s.vmx.pRealModeTSS)
1256 {
1257 if (CPUMIsGuestInRealModeEx(pCtx))
1258 {
1259 /* VT-x will not allow high selector bases in v86 mode; fall back to the recompiler in that case.
1260 * The base must also be equal to (sel << 4).
1261 */
1262 if ( ( pCtx->cs != (pCtx->csHid.u64Base >> 4)
1263 && pCtx->csHid.u64Base != 0xffff0000 /* we can deal with the BIOS code as it's also mapped into the lower region. */)
1264 || pCtx->ds != (pCtx->dsHid.u64Base >> 4)
1265 || pCtx->es != (pCtx->esHid.u64Base >> 4)
1266 || pCtx->fs != (pCtx->fsHid.u64Base >> 4)
1267 || pCtx->gs != (pCtx->gsHid.u64Base >> 4)
1268 || pCtx->ss != (pCtx->ssHid.u64Base >> 4))
1269 {
1270 return false;
1271 }
1272 }
1273 else
1274 {
1275 PGMMODE enmGuestMode = PGMGetGuestMode(pVM);
1276 /* Verify the requirements for executing code in protected mode. VT-x can't handle the CPU state right after a switch
1277 * from real to protected mode. (all sorts of RPL & DPL assumptions)
1278 */
1279 PVMCPU pVCpu = VMMGetCpu(pVM);
1280
1281 if ( pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
1282 && enmGuestMode >= PGMMODE_PROTECTED)
1283 {
1284 if ( (pCtx->cs & X86_SEL_RPL)
1285 || (pCtx->ds & X86_SEL_RPL)
1286 || (pCtx->es & X86_SEL_RPL)
1287 || (pCtx->fs & X86_SEL_RPL)
1288 || (pCtx->gs & X86_SEL_RPL)
1289 || (pCtx->ss & X86_SEL_RPL))
1290 {
1291 return false;
1292 }
1293 }
1294 }
1295 }
1296 else
1297#endif /* HWACCM_VMX_EMULATE_REALMODE */
1298 {
1299 if (!CPUMIsGuestInLongModeEx(pCtx))
1300 {
1301 /** @todo This should (probably) be set on every excursion to the REM,
1302 * however it's too risky right now. So, only apply it when we go
1303 * back to REM for real mode execution. (The XP hack below doesn't
1304 * work reliably without this.)
1305 * Update: Implemented in EM.cpp, see #ifdef EM_NOTIFY_HWACCM. */
1306 pVM->aCpus[0].hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
1307
1308 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
1309 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr == 0)
1310 return false;
1311
1312 /* The guest is about to complete the switch to protected mode. Wait a bit longer. */
1313 /* Windows XP; switch to protected mode; all selectors are marked not present in the
1314 * hidden registers (possible recompiler bug; see load_seg_vm) */
1315 if (pCtx->csHid.Attr.n.u1Present == 0)
1316 return false;
1317 if (pCtx->ssHid.Attr.n.u1Present == 0)
1318 return false;
1319
1320 /* Windows XP: possible same as above, but new recompiler requires new heuristics?
1321 VT-x doesn't seem to like something about the guest state and this stuff avoids it. */
1322 /** @todo This check is actually wrong, it doesn't take the direction of the
1323 * stack segment into account. But, it does the job for now. */
1324 if (pCtx->rsp >= pCtx->ssHid.u32Limit)
1325 return false;
1326#if 0
1327 if ( pCtx->cs >= pCtx->gdtr.cbGdt
1328 || pCtx->ss >= pCtx->gdtr.cbGdt
1329 || pCtx->ds >= pCtx->gdtr.cbGdt
1330 || pCtx->es >= pCtx->gdtr.cbGdt
1331 || pCtx->fs >= pCtx->gdtr.cbGdt
1332 || pCtx->gs >= pCtx->gdtr.cbGdt)
1333 return false;
1334#endif
1335 }
1336 }
1337
1338 if (pVM->hwaccm.s.vmx.fEnabled)
1339 {
1340 uint32_t mask;
1341
1342 /* if bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
1343 mask = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0;
1344 /* Note: We ignore the NE bit here on purpose; see vmmr0\hwaccmr0.cpp for details. */
1345 mask &= ~X86_CR0_NE;
1346
1347#ifdef HWACCM_VMX_EMULATE_REALMODE
1348 if (pVM->hwaccm.s.vmx.pRealModeTSS)
1349 {
1350 /* Note: We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
1351 mask &= ~(X86_CR0_PG|X86_CR0_PE);
1352 }
1353 else
1354#endif
1355 {
1356 /* We support protected mode without paging using identity mapping. */
1357 mask &= ~X86_CR0_PG;
1358 }
1359 if ((pCtx->cr0 & mask) != mask)
1360 return false;
1361
1362 /* if bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
1363 mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1;
1364 if ((pCtx->cr0 & mask) != 0)
1365 return false;
1366
1367 /* if bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
1368 mask = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0;
1369 mask &= ~X86_CR4_VMXE;
1370 if ((pCtx->cr4 & mask) != mask)
1371 return false;
1372
1373 /* if bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
1374 mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1;
1375 if ((pCtx->cr4 & mask) != 0)
1376 return false;
1377
1378 pVM->hwaccm.s.fActive = true;
1379 return true;
1380 }
1381
1382 return false;
1383}
1384
1385/**
1386 * Notifcation from EM about a rescheduling into hardware assisted execution
1387 * mode.
1388 *
1389 * @param pVCpu Pointer to the current virtual cpu structure.
1390 */
1391VMMR3DECL(void) HWACCMR3NotifyScheduled(PVMCPU pVCpu)
1392{
1393 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
1394}
1395
1396/**
1397 * Notifcation from EM about returning from instruction emulation (REM / EM).
1398 *
1399 * @param pVCpu Pointer to the current virtual cpu structure.
1400 */
1401VMMR3DECL(void) HWACCMR3NotifyEmulated(PVMCPU pVCpu)
1402{
1403 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
1404}
1405
1406/**
1407 * Checks if we are currently using hardware accelerated raw mode.
1408 *
1409 * @returns boolean
1410 * @param pVM The VM to operate on.
1411 */
1412VMMR3DECL(bool) HWACCMR3IsActive(PVM pVM)
1413{
1414 return pVM->hwaccm.s.fActive;
1415}
1416
1417/**
1418 * Checks if we are currently using nested paging.
1419 *
1420 * @returns boolean
1421 * @param pVM The VM to operate on.
1422 */
1423VMMR3DECL(bool) HWACCMR3IsNestedPagingActive(PVM pVM)
1424{
1425 return pVM->hwaccm.s.fNestedPaging;
1426}
1427
1428/**
1429 * Checks if we are currently using VPID in VT-x mode.
1430 *
1431 * @returns boolean
1432 * @param pVM The VM to operate on.
1433 */
1434VMMR3DECL(bool) HWACCMR3IsVPIDActive(PVM pVM)
1435{
1436 return pVM->hwaccm.s.vmx.fVPID;
1437}
1438
1439
1440/**
1441 * Checks if internal events are pending. In that case we are not allowed to dispatch interrupts.
1442 *
1443 * @returns boolean
1444 * @param pVM The VM to operate on.
1445 */
1446VMMR3DECL(bool) HWACCMR3IsEventPending(PVM pVM)
1447{
1448 /* @todo SMP */
1449 return HWACCMIsEnabled(pVM) && pVM->aCpus[0].hwaccm.s.Event.fPending;
1450}
1451
1452
1453/**
1454 * Inject an NMI into a running VM
1455 *
1456 * @returns boolean
1457 * @param pVM The VM to operate on.
1458 */
1459VMMR3DECL(int) HWACCMR3InjectNMI(PVM pVM)
1460{
1461 pVM->hwaccm.s.fInjectNMI = true;
1462 return VINF_SUCCESS;
1463}
1464
1465/**
1466 * Check fatal VT-x/AMD-V error and produce some meaningful
1467 * log release message.
1468 *
1469 * @param pVM The VM to operate on.
1470 * @param iStatusCode VBox status code
1471 */
1472VMMR3DECL(void) HWACCMR3CheckError(PVM pVM, int iStatusCode)
1473{
1474 for (unsigned i=0;i<pVM->cCPUs;i++)
1475 {
1476 switch(iStatusCode)
1477 {
1478 case VERR_VMX_INVALID_VMCS_FIELD:
1479 break;
1480
1481 case VERR_VMX_INVALID_VMCS_PTR:
1482 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys));
1483 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current VMCS version %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulVMCSRevision));
1484 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Entered Cpu %d\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.idEnteredCpu));
1485 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current Cpu %d\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.idCurrentCpu));
1486 break;
1487
1488 case VERR_VMX_UNABLE_TO_START_VM:
1489 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError));
1490 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulExitReason));
1491#if 0 /* @todo dump the current control fields to the release log */
1492 if (pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS)
1493 {
1494
1495 }
1496#endif
1497 break;
1498
1499 case VERR_VMX_UNABLE_TO_RESUME_VM:
1500 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError));
1501 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulExitReason));
1502 break;
1503
1504 case VERR_VMX_INVALID_VMXON_PTR:
1505 break;
1506 }
1507 }
1508}
1509
1510/**
1511 * Execute state save operation.
1512 *
1513 * @returns VBox status code.
1514 * @param pVM VM Handle.
1515 * @param pSSM SSM operation handle.
1516 */
1517static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM)
1518{
1519 int rc;
1520
1521 Log(("hwaccmR3Save:\n"));
1522
1523 for (unsigned i=0;i<pVM->cCPUs;i++)
1524 {
1525 /*
1526 * Save the basic bits - fortunately all the other things can be resynced on load.
1527 */
1528 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.fPending);
1529 AssertRCReturn(rc, rc);
1530 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.errCode);
1531 AssertRCReturn(rc, rc);
1532 rc = SSMR3PutU64(pSSM, pVM->aCpus[i].hwaccm.s.Event.intInfo);
1533 AssertRCReturn(rc, rc);
1534
1535 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmLastSeenGuestMode);
1536 AssertRCReturn(rc, rc);
1537 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmCurrGuestMode);
1538 AssertRCReturn(rc, rc);
1539 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmPrevGuestMode);
1540 AssertRCReturn(rc, rc);
1541 }
1542
1543 return VINF_SUCCESS;
1544}
1545
1546/**
1547 * Execute state load operation.
1548 *
1549 * @returns VBox status code.
1550 * @param pVM VM Handle.
1551 * @param pSSM SSM operation handle.
1552 * @param u32Version Data layout version.
1553 */
1554static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
1555{
1556 int rc;
1557
1558 Log(("hwaccmR3Load:\n"));
1559
1560 /*
1561 * Validate version.
1562 */
1563 if ( u32Version != HWACCM_SSM_VERSION
1564 && u32Version != HWACCM_SSM_VERSION_2_0_X)
1565 {
1566 AssertMsgFailed(("hwaccmR3Load: Invalid version u32Version=%d!\n", u32Version));
1567 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1568 }
1569 for (unsigned i=0;i<pVM->cCPUs;i++)
1570 {
1571 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.fPending);
1572 AssertRCReturn(rc, rc);
1573 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.errCode);
1574 AssertRCReturn(rc, rc);
1575 rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].hwaccm.s.Event.intInfo);
1576 AssertRCReturn(rc, rc);
1577
1578 if (u32Version >= HWACCM_SSM_VERSION)
1579 {
1580 uint32_t val;
1581
1582 rc = SSMR3GetU32(pSSM, &val);
1583 AssertRCReturn(rc, rc);
1584 pVM->aCpus[i].hwaccm.s.vmx.enmLastSeenGuestMode = (PGMMODE)val;
1585
1586 rc = SSMR3GetU32(pSSM, &val);
1587 AssertRCReturn(rc, rc);
1588 pVM->aCpus[i].hwaccm.s.vmx.enmCurrGuestMode = (PGMMODE)val;
1589
1590 rc = SSMR3GetU32(pSSM, &val);
1591 AssertRCReturn(rc, rc);
1592 pVM->aCpus[i].hwaccm.s.vmx.enmPrevGuestMode = (PGMMODE)val;
1593 }
1594 }
1595 return VINF_SUCCESS;
1596}
1597
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette