VirtualBox

source: vbox/trunk/src/VBox/VMM/HWACCM.cpp@ 26146

Last change on this file since 26146 was 26146, checked in by vboxsync, 15 years ago

Make sure we fall back to the recompiler if the VMM device heap is inactive (VT-x real mode or VT-x+EPT protected mode without paging)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 129.3 KB
Line 
1/* $Id: HWACCM.cpp 26146 2010-02-02 13:51:26Z vboxsync $ */
2/** @file
3 * HWACCM - Intel/AMD VM Hardware Support Manager
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_HWACCM
26#include <VBox/cpum.h>
27#include <VBox/stam.h>
28#include <VBox/mm.h>
29#include <VBox/pdm.h>
30#include <VBox/pgm.h>
31#include <VBox/trpm.h>
32#include <VBox/dbgf.h>
33#include <VBox/patm.h>
34#include <VBox/csam.h>
35#include <VBox/selm.h>
36#include <VBox/rem.h>
37#include <VBox/hwacc_vmx.h>
38#include <VBox/hwacc_svm.h>
39#include "HWACCMInternal.h"
40#include <VBox/vm.h>
41#include <VBox/err.h>
42#include <VBox/param.h>
43
44#include <iprt/assert.h>
45#include <VBox/log.h>
46#include <iprt/asm.h>
47#include <iprt/string.h>
48#include <iprt/env.h>
49#include <iprt/thread.h>
50
51/*******************************************************************************
52* Global Variables *
53*******************************************************************************/
54#ifdef VBOX_WITH_STATISTICS
55# define EXIT_REASON(def, val, str) #def " - " #val " - " str
56# define EXIT_REASON_NIL() NULL
57/** Exit reason descriptions for VT-x, used to describe statistics. */
58static const char * const g_apszVTxExitReasons[MAX_EXITREASON_STAT] =
59{
60 EXIT_REASON(VMX_EXIT_EXCEPTION , 0, "Exception or non-maskable interrupt (NMI)."),
61 EXIT_REASON(VMX_EXIT_EXTERNAL_IRQ , 1, "External interrupt."),
62 EXIT_REASON(VMX_EXIT_TRIPLE_FAULT , 2, "Triple fault."),
63 EXIT_REASON(VMX_EXIT_INIT_SIGNAL , 3, "INIT signal."),
64 EXIT_REASON(VMX_EXIT_SIPI , 4, "Start-up IPI (SIPI)."),
65 EXIT_REASON(VMX_EXIT_IO_SMI_IRQ , 5, "I/O system-management interrupt (SMI)."),
66 EXIT_REASON(VMX_EXIT_SMI_IRQ , 6, "Other SMI."),
67 EXIT_REASON(VMX_EXIT_IRQ_WINDOW , 7, "Interrupt window."),
68 EXIT_REASON_NIL(),
69 EXIT_REASON(VMX_EXIT_TASK_SWITCH , 9, "Task switch."),
70 EXIT_REASON(VMX_EXIT_CPUID , 10, "Guest software attempted to execute CPUID."),
71 EXIT_REASON_NIL(),
72 EXIT_REASON(VMX_EXIT_HLT , 12, "Guest software attempted to execute HLT."),
73 EXIT_REASON(VMX_EXIT_INVD , 13, "Guest software attempted to execute INVD."),
74 EXIT_REASON(VMX_EXIT_INVPG , 14, "Guest software attempted to execute INVPG."),
75 EXIT_REASON(VMX_EXIT_RDPMC , 15, "Guest software attempted to execute RDPMC."),
76 EXIT_REASON(VMX_EXIT_RDTSC , 16, "Guest software attempted to execute RDTSC."),
77 EXIT_REASON(VMX_EXIT_RSM , 17, "Guest software attempted to execute RSM in SMM."),
78 EXIT_REASON(VMX_EXIT_VMCALL , 18, "Guest software executed VMCALL."),
79 EXIT_REASON(VMX_EXIT_VMCLEAR , 19, "Guest software executed VMCLEAR."),
80 EXIT_REASON(VMX_EXIT_VMLAUNCH , 20, "Guest software executed VMLAUNCH."),
81 EXIT_REASON(VMX_EXIT_VMPTRLD , 21, "Guest software executed VMPTRLD."),
82 EXIT_REASON(VMX_EXIT_VMPTRST , 22, "Guest software executed VMPTRST."),
83 EXIT_REASON(VMX_EXIT_VMREAD , 23, "Guest software executed VMREAD."),
84 EXIT_REASON(VMX_EXIT_VMRESUME , 24, "Guest software executed VMRESUME."),
85 EXIT_REASON(VMX_EXIT_VMWRITE , 25, "Guest software executed VMWRITE."),
86 EXIT_REASON(VMX_EXIT_VMXOFF , 26, "Guest software executed VMXOFF."),
87 EXIT_REASON(VMX_EXIT_VMXON , 27, "Guest software executed VMXON."),
88 EXIT_REASON(VMX_EXIT_CRX_MOVE , 28, "Control-register accesses."),
89 EXIT_REASON(VMX_EXIT_DRX_MOVE , 29, "Debug-register accesses."),
90 EXIT_REASON(VMX_EXIT_PORT_IO , 30, "I/O instruction."),
91 EXIT_REASON(VMX_EXIT_RDMSR , 31, "RDMSR. Guest software attempted to execute RDMSR."),
92 EXIT_REASON(VMX_EXIT_WRMSR , 32, "WRMSR. Guest software attempted to execute WRMSR."),
93 EXIT_REASON(VMX_EXIT_ERR_INVALID_GUEST_STATE, 33, "VM-entry failure due to invalid guest state."),
94 EXIT_REASON(VMX_EXIT_ERR_MSR_LOAD , 34, "VM-entry failure due to MSR loading."),
95 EXIT_REASON_NIL(),
96 EXIT_REASON(VMX_EXIT_MWAIT , 36, "Guest software executed MWAIT."),
97 EXIT_REASON_NIL(),
98 EXIT_REASON_NIL(),
99 EXIT_REASON(VMX_EXIT_MONITOR , 39, "Guest software attempted to execute MONITOR."),
100 EXIT_REASON(VMX_EXIT_PAUSE , 40, "Guest software attempted to execute PAUSE."),
101 EXIT_REASON(VMX_EXIT_ERR_MACHINE_CHECK , 41, "VM-entry failure due to machine-check."),
102 EXIT_REASON_NIL(),
103 EXIT_REASON(VMX_EXIT_TPR , 43, "TPR below threshold. Guest software executed MOV to CR8."),
104 EXIT_REASON(VMX_EXIT_APIC_ACCESS , 44, "APIC access. Guest software attempted to access memory at a physical address on the APIC-access page."),
105 EXIT_REASON_NIL(),
106 EXIT_REASON(VMX_EXIT_XDTR_ACCESS , 46, "Access to GDTR or IDTR. Guest software attempted to execute LGDT, LIDT, SGDT, or SIDT."),
107 EXIT_REASON(VMX_EXIT_TR_ACCESS , 47, "Access to LDTR or TR. Guest software attempted to execute LLDT, LTR, SLDT, or STR."),
108 EXIT_REASON(VMX_EXIT_EPT_VIOLATION , 48, "EPT violation. An attempt to access memory with a guest-physical address was disallowed by the configuration of the EPT paging structures."),
109 EXIT_REASON(VMX_EXIT_EPT_MISCONFIG , 49, "EPT misconfiguration. An attempt to access memory with a guest-physical address encountered a misconfigured EPT paging-structure entry."),
110 EXIT_REASON(VMX_EXIT_INVEPT , 50, "INVEPT. Guest software attempted to execute INVEPT."),
111 EXIT_REASON_NIL(),
112 EXIT_REASON(VMX_EXIT_PREEMPTION_TIMER , 52, "VMX-preemption timer expired. The preemption timer counted down to zero."),
113 EXIT_REASON(VMX_EXIT_INVVPID , 53, "INVVPID. Guest software attempted to execute INVVPID."),
114 EXIT_REASON(VMX_EXIT_WBINVD , 54, "WBINVD. Guest software attempted to execute WBINVD."),
115 EXIT_REASON(VMX_EXIT_XSETBV , 55, "XSETBV. Guest software attempted to execute XSETBV."),
116 EXIT_REASON_NIL()
117};
118/** Exit reason descriptions for AMD-V, used to describe statistics. */
119static const char * const g_apszAmdVExitReasons[MAX_EXITREASON_STAT] =
120{
121 EXIT_REASON(SVM_EXIT_READ_CR0 , 0, "Read CR0."),
122 EXIT_REASON(SVM_EXIT_READ_CR1 , 1, "Read CR1."),
123 EXIT_REASON(SVM_EXIT_READ_CR2 , 2, "Read CR2."),
124 EXIT_REASON(SVM_EXIT_READ_CR3 , 3, "Read CR3."),
125 EXIT_REASON(SVM_EXIT_READ_CR4 , 4, "Read CR4."),
126 EXIT_REASON(SVM_EXIT_READ_CR5 , 5, "Read CR5."),
127 EXIT_REASON(SVM_EXIT_READ_CR6 , 6, "Read CR6."),
128 EXIT_REASON(SVM_EXIT_READ_CR7 , 7, "Read CR7."),
129 EXIT_REASON(SVM_EXIT_READ_CR8 , 8, "Read CR8."),
130 EXIT_REASON(SVM_EXIT_READ_CR9 , 9, "Read CR9."),
131 EXIT_REASON(SVM_EXIT_READ_CR10 , 10, "Read CR10."),
132 EXIT_REASON(SVM_EXIT_READ_CR11 , 11, "Read CR11."),
133 EXIT_REASON(SVM_EXIT_READ_CR12 , 12, "Read CR12."),
134 EXIT_REASON(SVM_EXIT_READ_CR13 , 13, "Read CR13."),
135 EXIT_REASON(SVM_EXIT_READ_CR14 , 14, "Read CR14."),
136 EXIT_REASON(SVM_EXIT_READ_CR15 , 15, "Read CR15."),
137 EXIT_REASON(SVM_EXIT_WRITE_CR0 , 16, "Write CR0."),
138 EXIT_REASON(SVM_EXIT_WRITE_CR1 , 17, "Write CR1."),
139 EXIT_REASON(SVM_EXIT_WRITE_CR2 , 18, "Write CR2."),
140 EXIT_REASON(SVM_EXIT_WRITE_CR3 , 19, "Write CR3."),
141 EXIT_REASON(SVM_EXIT_WRITE_CR4 , 20, "Write CR4."),
142 EXIT_REASON(SVM_EXIT_WRITE_CR5 , 21, "Write CR5."),
143 EXIT_REASON(SVM_EXIT_WRITE_CR6 , 22, "Write CR6."),
144 EXIT_REASON(SVM_EXIT_WRITE_CR7 , 23, "Write CR7."),
145 EXIT_REASON(SVM_EXIT_WRITE_CR8 , 24, "Write CR8."),
146 EXIT_REASON(SVM_EXIT_WRITE_CR9 , 25, "Write CR9."),
147 EXIT_REASON(SVM_EXIT_WRITE_CR10 , 26, "Write CR10."),
148 EXIT_REASON(SVM_EXIT_WRITE_CR11 , 27, "Write CR11."),
149 EXIT_REASON(SVM_EXIT_WRITE_CR12 , 28, "Write CR12."),
150 EXIT_REASON(SVM_EXIT_WRITE_CR13 , 29, "Write CR13."),
151 EXIT_REASON(SVM_EXIT_WRITE_CR14 , 30, "Write CR14."),
152 EXIT_REASON(SVM_EXIT_WRITE_CR15 , 31, "Write CR15."),
153 EXIT_REASON(SVM_EXIT_READ_DR0 , 32, "Read DR0."),
154 EXIT_REASON(SVM_EXIT_READ_DR1 , 33, "Read DR1."),
155 EXIT_REASON(SVM_EXIT_READ_DR2 , 34, "Read DR2."),
156 EXIT_REASON(SVM_EXIT_READ_DR3 , 35, "Read DR3."),
157 EXIT_REASON(SVM_EXIT_READ_DR4 , 36, "Read DR4."),
158 EXIT_REASON(SVM_EXIT_READ_DR5 , 37, "Read DR5."),
159 EXIT_REASON(SVM_EXIT_READ_DR6 , 38, "Read DR6."),
160 EXIT_REASON(SVM_EXIT_READ_DR7 , 39, "Read DR7."),
161 EXIT_REASON(SVM_EXIT_READ_DR8 , 40, "Read DR8."),
162 EXIT_REASON(SVM_EXIT_READ_DR9 , 41, "Read DR9."),
163 EXIT_REASON(SVM_EXIT_READ_DR10 , 42, "Read DR10."),
164 EXIT_REASON(SVM_EXIT_READ_DR11 , 43, "Read DR11"),
165 EXIT_REASON(SVM_EXIT_READ_DR12 , 44, "Read DR12."),
166 EXIT_REASON(SVM_EXIT_READ_DR13 , 45, "Read DR13."),
167 EXIT_REASON(SVM_EXIT_READ_DR14 , 46, "Read DR14."),
168 EXIT_REASON(SVM_EXIT_READ_DR15 , 47, "Read DR15."),
169 EXIT_REASON(SVM_EXIT_WRITE_DR0 , 48, "Write DR0."),
170 EXIT_REASON(SVM_EXIT_WRITE_DR1 , 49, "Write DR1."),
171 EXIT_REASON(SVM_EXIT_WRITE_DR2 , 50, "Write DR2."),
172 EXIT_REASON(SVM_EXIT_WRITE_DR3 , 51, "Write DR3."),
173 EXIT_REASON(SVM_EXIT_WRITE_DR4 , 52, "Write DR4."),
174 EXIT_REASON(SVM_EXIT_WRITE_DR5 , 53, "Write DR5."),
175 EXIT_REASON(SVM_EXIT_WRITE_DR6 , 54, "Write DR6."),
176 EXIT_REASON(SVM_EXIT_WRITE_DR7 , 55, "Write DR7."),
177 EXIT_REASON(SVM_EXIT_WRITE_DR8 , 56, "Write DR8."),
178 EXIT_REASON(SVM_EXIT_WRITE_DR9 , 57, "Write DR9."),
179 EXIT_REASON(SVM_EXIT_WRITE_DR10 , 58, "Write DR10."),
180 EXIT_REASON(SVM_EXIT_WRITE_DR11 , 59, "Write DR11."),
181 EXIT_REASON(SVM_EXIT_WRITE_DR12 , 60, "Write DR12."),
182 EXIT_REASON(SVM_EXIT_WRITE_DR13 , 61, "Write DR13."),
183 EXIT_REASON(SVM_EXIT_WRITE_DR14 , 62, "Write DR14."),
184 EXIT_REASON(SVM_EXIT_WRITE_DR15 , 63, "Write DR15."),
185 EXIT_REASON(SVM_EXIT_EXCEPTION_0 , 64, "Exception Vector 0 (0x0)."),
186 EXIT_REASON(SVM_EXIT_EXCEPTION_1 , 65, "Exception Vector 1 (0x1)."),
187 EXIT_REASON(SVM_EXIT_EXCEPTION_2 , 66, "Exception Vector 2 (0x2)."),
188 EXIT_REASON(SVM_EXIT_EXCEPTION_3 , 67, "Exception Vector 3 (0x3)."),
189 EXIT_REASON(SVM_EXIT_EXCEPTION_4 , 68, "Exception Vector 4 (0x4)."),
190 EXIT_REASON(SVM_EXIT_EXCEPTION_5 , 69, "Exception Vector 5 (0x5)."),
191 EXIT_REASON(SVM_EXIT_EXCEPTION_6 , 70, "Exception Vector 6 (0x6)."),
192 EXIT_REASON(SVM_EXIT_EXCEPTION_7 , 71, "Exception Vector 7 (0x7)."),
193 EXIT_REASON(SVM_EXIT_EXCEPTION_8 , 72, "Exception Vector 8 (0x8)."),
194 EXIT_REASON(SVM_EXIT_EXCEPTION_9 , 73, "Exception Vector 9 (0x9)."),
195 EXIT_REASON(SVM_EXIT_EXCEPTION_A , 74, "Exception Vector 10 (0xA)."),
196 EXIT_REASON(SVM_EXIT_EXCEPTION_B , 75, "Exception Vector 11 (0xB)."),
197 EXIT_REASON(SVM_EXIT_EXCEPTION_C , 76, "Exception Vector 12 (0xC)."),
198 EXIT_REASON(SVM_EXIT_EXCEPTION_D , 77, "Exception Vector 13 (0xD)."),
199 EXIT_REASON(SVM_EXIT_EXCEPTION_E , 78, "Exception Vector 14 (0xE)."),
200 EXIT_REASON(SVM_EXIT_EXCEPTION_F , 79, "Exception Vector 15 (0xF)."),
201 EXIT_REASON(SVM_EXIT_EXCEPTION_10 , 80, "Exception Vector 16 (0x10)."),
202 EXIT_REASON(SVM_EXIT_EXCEPTION_11 , 81, "Exception Vector 17 (0x11)."),
203 EXIT_REASON(SVM_EXIT_EXCEPTION_12 , 82, "Exception Vector 18 (0x12)."),
204 EXIT_REASON(SVM_EXIT_EXCEPTION_13 , 83, "Exception Vector 19 (0x13)."),
205 EXIT_REASON(SVM_EXIT_EXCEPTION_14 , 84, "Exception Vector 20 (0x14)."),
206 EXIT_REASON(SVM_EXIT_EXCEPTION_15 , 85, "Exception Vector 22 (0x15)."),
207 EXIT_REASON(SVM_EXIT_EXCEPTION_16 , 86, "Exception Vector 22 (0x16)."),
208 EXIT_REASON(SVM_EXIT_EXCEPTION_17 , 87, "Exception Vector 23 (0x17)."),
209 EXIT_REASON(SVM_EXIT_EXCEPTION_18 , 88, "Exception Vector 24 (0x18)."),
210 EXIT_REASON(SVM_EXIT_EXCEPTION_19 , 89, "Exception Vector 25 (0x19)."),
211 EXIT_REASON(SVM_EXIT_EXCEPTION_1A , 90, "Exception Vector 26 (0x1A)."),
212 EXIT_REASON(SVM_EXIT_EXCEPTION_1B , 91, "Exception Vector 27 (0x1B)."),
213 EXIT_REASON(SVM_EXIT_EXCEPTION_1C , 92, "Exception Vector 28 (0x1C)."),
214 EXIT_REASON(SVM_EXIT_EXCEPTION_1D , 93, "Exception Vector 29 (0x1D)."),
215 EXIT_REASON(SVM_EXIT_EXCEPTION_1E , 94, "Exception Vector 30 (0x1E)."),
216 EXIT_REASON(SVM_EXIT_EXCEPTION_1F , 95, "Exception Vector 31 (0x1F)."),
217 EXIT_REASON(SVM_EXIT_EXCEPTION_INTR , 96, "Physical maskable interrupt."),
218 EXIT_REASON(SVM_EXIT_EXCEPTION_NMI , 97, "Physical non-maskable interrupt."),
219 EXIT_REASON(SVM_EXIT_EXCEPTION_SMI , 98, "System management interrupt."),
220 EXIT_REASON(SVM_EXIT_EXCEPTION_INIT , 99, "Physical INIT signal."),
221 EXIT_REASON(SVM_EXIT_EXCEPTION_VINTR ,100, "Visual interrupt."),
222 EXIT_REASON(SVM_EXIT_EXCEPTION_CR0_SEL_WRITE ,101, "Write to CR0 that changed any bits other than CR0.TS or CR0.MP."),
223 EXIT_REASON(SVM_EXIT_EXCEPTION_IDTR_READ ,102, "Read IDTR"),
224 EXIT_REASON(SVM_EXIT_EXCEPTION_GDTR_READ ,103, "Read GDTR"),
225 EXIT_REASON(SVM_EXIT_EXCEPTION_LDTR_READ ,104, "Read LDTR."),
226 EXIT_REASON(SVM_EXIT_EXCEPTION_TR_READ ,105, "Read TR."),
227 EXIT_REASON(SVM_EXIT_EXCEPTION_TR_READ ,106, "Write IDTR."),
228 EXIT_REASON(SVM_EXIT_EXCEPTION_TR_READ ,107, "Write GDTR."),
229 EXIT_REASON(SVM_EXIT_EXCEPTION_TR_READ ,108, "Write LDTR."),
230 EXIT_REASON(SVM_EXIT_EXCEPTION_TR_READ ,109, "Write TR."),
231 EXIT_REASON(SVM_EXIT_RDTSC ,110, "RDTSC instruction."),
232 EXIT_REASON(SVM_EXIT_RDPMC ,111, "RDPMC instruction."),
233 EXIT_REASON(SVM_EXIT_PUSHF ,112, "PUSHF instruction."),
234 EXIT_REASON(SVM_EXIT_POPF ,113, "POPF instruction."),
235 EXIT_REASON(SVM_EXIT_CPUID ,114, "CPUID instruction."),
236 EXIT_REASON(SVM_EXIT_RSM ,115, "RSM instruction."),
237 EXIT_REASON(SVM_EXIT_IRET ,116, "IRET instruction."),
238 EXIT_REASON(SVM_EXIT_SWINT ,117, "Software interrupt (INTn instructions)."),
239 EXIT_REASON(SVM_EXIT_INVD ,118, "INVD instruction."),
240 EXIT_REASON(SVM_EXIT_PAUSE ,119, "PAUSE instruction."),
241 EXIT_REASON(SVM_EXIT_HLT ,120, "HLT instruction."),
242 EXIT_REASON(SVM_EXIT_INVLPG ,121, "INVLPG instruction."),
243 EXIT_REASON(SVM_EXIT_INVLPGA ,122, "INVLPGA instruction."),
244 EXIT_REASON(SVM_EXIT_IOIO ,123, "IN/OUT accessing protected port (EXITINFO1 field provides more information)."),
245 EXIT_REASON(SVM_EXIT_MSR ,124, "RDMSR or WRMSR access to protected MSR."),
246 EXIT_REASON(SVM_EXIT_TASK_SWITCH ,125, "Task switch."),
247 EXIT_REASON(SVM_EXIT_FERR_FREEZE ,126, "FP legacy handling enabled, and processor is frozen in an x87/mmx instruction waiting for an interrupt"),
248 EXIT_REASON(SVM_EXIT_TASK_SHUTDOWN ,127, "Shutdown."),
249 EXIT_REASON(SVM_EXIT_TASK_VMRUN ,128, "VMRUN instruction."),
250 EXIT_REASON(SVM_EXIT_TASK_VMCALL ,129, "VMCALL instruction."),
251 EXIT_REASON(SVM_EXIT_TASK_VMLOAD ,130, "VMLOAD instruction."),
252 EXIT_REASON(SVM_EXIT_TASK_VMSAVE ,131, "VMSAVE instruction."),
253 EXIT_REASON(SVM_EXIT_TASK_STGI ,132, "STGI instruction."),
254 EXIT_REASON(SVM_EXIT_TASK_CLGI ,133, "CLGI instruction."),
255 EXIT_REASON(SVM_EXIT_TASK_SKINIT ,134, "SKINIT instruction."),
256 EXIT_REASON(SVM_EXIT_TASK_RDTSCP ,135, "RDTSCP instruction."),
257 EXIT_REASON(SVM_EXIT_TASK_ICEBP ,136, "ICEBP instruction."),
258 EXIT_REASON(SVM_EXIT_TASK_WBINVD ,137, "WBINVD instruction."),
259 EXIT_REASON(SVM_EXIT_TASK_MONITOR ,138, "MONITOR instruction."),
260 EXIT_REASON(SVM_EXIT_MWAIT_UNCOND ,139, "MWAIT instruction unconditional."),
261 EXIT_REASON(SVM_EXIT_MWAIT_ARMED ,140, "MWAIT instruction when armed."),
262 EXIT_REASON(SVM_EXIT_NPF ,1024, "Nested paging: host-level page fault occurred (EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault)."),
263 EXIT_REASON_NIL()
264};
265# undef EXIT_REASON
266# undef EXIT_REASON_NIL
267#endif /* VBOX_WITH_STATISTICS */
268
269/*******************************************************************************
270* Internal Functions *
271*******************************************************************************/
272static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM);
273static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
274
275
276/**
277 * Initializes the HWACCM.
278 *
279 * @returns VBox status code.
280 * @param pVM The VM to operate on.
281 */
282VMMR3DECL(int) HWACCMR3Init(PVM pVM)
283{
284 LogFlow(("HWACCMR3Init\n"));
285
286 /*
287 * Assert alignment and sizes.
288 */
289 AssertCompileMemberAlignment(VM, hwaccm.s, 32);
290 AssertCompile(sizeof(pVM->hwaccm.s) <= sizeof(pVM->hwaccm.padding));
291
292 /* Some structure checks. */
293 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, u8Reserved3) == 0xC0, ("u8Reserved3 offset = %x\n", RT_OFFSETOF(SVM_VMCB, u8Reserved3)));
294 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.EventInject) == 0xA8, ("ctrl.EventInject offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.EventInject)));
295 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo) == 0x88, ("ctrl.ExitIntInfo offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo)));
296 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl) == 0x58, ("ctrl.TLBCtrl offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl)));
297
298 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest) == 0x400, ("guest offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest)));
299 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved4) == 0x4A0, ("guest.u8Reserved4 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved4)));
300 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved6) == 0x4D8, ("guest.u8Reserved6 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved6)));
301 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved7) == 0x580, ("guest.u8Reserved7 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved7)));
302 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved9) == 0x648, ("guest.u8Reserved9 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved9)));
303 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, u8Reserved10) == 0x698, ("u8Reserved3 offset = %x\n", RT_OFFSETOF(SVM_VMCB, u8Reserved10)));
304 AssertReleaseMsg(sizeof(SVM_VMCB) == 0x1000, ("SVM_VMCB size = %x\n", sizeof(SVM_VMCB)));
305
306
307 /*
308 * Register the saved state data unit.
309 */
310 int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HWACCM_SSM_VERSION, sizeof(HWACCM),
311 NULL, NULL, NULL,
312 NULL, hwaccmR3Save, NULL,
313 NULL, hwaccmR3Load, NULL);
314 if (RT_FAILURE(rc))
315 return rc;
316
317 /* Misc initialisation. */
318 pVM->hwaccm.s.vmx.fSupported = false;
319 pVM->hwaccm.s.svm.fSupported = false;
320 pVM->hwaccm.s.vmx.fEnabled = false;
321 pVM->hwaccm.s.svm.fEnabled = false;
322
323 pVM->hwaccm.s.fNestedPaging = false;
324
325 /* Disabled by default. */
326 pVM->fHWACCMEnabled = false;
327
328 /*
329 * Check CFGM options.
330 */
331 PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
332 PCFGMNODE pHWVirtExt = CFGMR3GetChild(pRoot, "HWVirtExt/");
333 /* Nested paging: disabled by default. */
334 rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableNestedPaging", &pVM->hwaccm.s.fAllowNestedPaging, false);
335 AssertRC(rc);
336
337 /* VT-x VPID: disabled by default. */
338 rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableVPID", &pVM->hwaccm.s.vmx.fAllowVPID, false);
339 AssertRC(rc);
340
341 /* HWACCM support must be explicitely enabled in the configuration file. */
342 rc = CFGMR3QueryBoolDef(pHWVirtExt, "Enabled", &pVM->hwaccm.s.fAllowed, false);
343 AssertRC(rc);
344
345 /* TPR patching for 32 bits (Windows) guests with IO-APIC: disabled by default. */
346 rc = CFGMR3QueryBoolDef(pHWVirtExt, "TPRPatchingEnabled", &pVM->hwaccm.s.fTRPPatchingAllowed, false);
347 AssertRC(rc);
348
349#ifdef RT_OS_DARWIN
350 if (VMMIsHwVirtExtForced(pVM) != pVM->hwaccm.s.fAllowed)
351#else
352 if (VMMIsHwVirtExtForced(pVM) && !pVM->hwaccm.s.fAllowed)
353#endif
354 {
355 AssertLogRelMsgFailed(("VMMIsHwVirtExtForced=%RTbool fAllowed=%RTbool\n",
356 VMMIsHwVirtExtForced(pVM), pVM->hwaccm.s.fAllowed));
357 return VERR_HWACCM_CONFIG_MISMATCH;
358 }
359
360 if (VMMIsHwVirtExtForced(pVM))
361 pVM->fHWACCMEnabled = true;
362
363#if HC_ARCH_BITS == 32
364 /* 64-bit mode is configurable and it depends on both the kernel mode and VT-x.
365 * (To use the default, don't set 64bitEnabled in CFGM.) */
366 rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hwaccm.s.fAllow64BitGuests, false);
367 AssertLogRelRCReturn(rc, rc);
368 if (pVM->hwaccm.s.fAllow64BitGuests)
369 {
370# ifdef RT_OS_DARWIN
371 if (!VMMIsHwVirtExtForced(pVM))
372# else
373 if (!pVM->hwaccm.s.fAllowed)
374# endif
375 return VM_SET_ERROR(pVM, VERR_INVALID_PARAMETER, "64-bit guest support was requested without also enabling HWVirtEx (VT-x/AMD-V).");
376 }
377#else
378 /* On 64-bit hosts 64-bit guest support is enabled by default, but allow this to be overridden
379 * via VBoxInternal/HWVirtExt/64bitEnabled=0. (ConsoleImpl2.cpp doesn't set this to false for 64-bit.) */
380 rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hwaccm.s.fAllow64BitGuests, true);
381 AssertLogRelRCReturn(rc, rc);
382#endif
383
384
385 /** Determine the init method for AMD-V and VT-x; either one global init for each host CPU
386 * or local init each time we wish to execute guest code.
387 *
388 * Default false for Mac OS X and Windows due to the higher risk of conflicts with other hypervisors.
389 */
390 rc = CFGMR3QueryBoolDef(pHWVirtExt, "Exclusive", &pVM->hwaccm.s.fGlobalInit,
391#if defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS)
392 false
393#else
394 true
395#endif
396 );
397
398 /* Max number of resume loops. */
399 rc = CFGMR3QueryU32Def(pHWVirtExt, "MaxResumeLoops", &pVM->hwaccm.s.cMaxResumeLoops, 0 /* set by R0 later */);
400 AssertRC(rc);
401
402 return VINF_SUCCESS;
403}
404
405/**
406 * Initializes the per-VCPU HWACCM.
407 *
408 * @returns VBox status code.
409 * @param pVM The VM to operate on.
410 */
411VMMR3DECL(int) HWACCMR3InitCPU(PVM pVM)
412{
413 LogFlow(("HWACCMR3InitCPU\n"));
414
415 for (VMCPUID i = 0; i < pVM->cCpus; i++)
416 {
417 PVMCPU pVCpu = &pVM->aCpus[i];
418
419 pVCpu->hwaccm.s.fActive = false;
420 }
421
422#ifdef VBOX_WITH_STATISTICS
423 STAM_REG(pVM, &pVM->hwaccm.s.StatTPRPatchSuccess, STAMTYPE_COUNTER, "/HWACCM/TPR/Patch/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
424 STAM_REG(pVM, &pVM->hwaccm.s.StatTPRPatchFailure, STAMTYPE_COUNTER, "/HWACCM/TPR/Patch/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
425 STAM_REG(pVM, &pVM->hwaccm.s.StatTPRReplaceSuccess, STAMTYPE_COUNTER, "/HWACCM/TPR/Replace/Success",STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
426 STAM_REG(pVM, &pVM->hwaccm.s.StatTPRReplaceFailure, STAMTYPE_COUNTER, "/HWACCM/TPR/Replace/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
427
428 /*
429 * Statistics.
430 */
431 for (VMCPUID i = 0; i < pVM->cCpus; i++)
432 {
433 PVMCPU pVCpu = &pVM->aCpus[i];
434 int rc;
435
436 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of RTMpPokeCpu",
437 "/PROF/HWACCM/CPU%d/Poke", i);
438 AssertRC(rc);
439 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatSpinPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of poke wait",
440 "/PROF/HWACCM/CPU%d/PokeWait", i);
441 AssertRC(rc);
442 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatSpinPokeFailed, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of poke wait when RTMpPokeCpu fails",
443 "/PROF/HWACCM/CPU%d/PokeWaitFailed", i);
444 AssertRC(rc);
445 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatEntry, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode entry",
446 "/PROF/HWACCM/CPU%d/SwitchToGC", i);
447 AssertRC(rc);
448 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 1",
449 "/PROF/HWACCM/CPU%d/SwitchFromGC_1", i);
450 AssertRC(rc);
451 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 2",
452 "/PROF/HWACCM/CPU%d/SwitchFromGC_2", i);
453 AssertRC(rc);
454# if 1 /* temporary for tracking down darwin holdup. */
455 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - I/O",
456 "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub1", i);
457 AssertRC(rc);
458 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - CRx RWs",
459 "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub2", i);
460 AssertRC(rc);
461 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - Exceptions",
462 "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub3", i);
463 AssertRC(rc);
464# endif
465 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatInGC, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of vmlaunch",
466 "/PROF/HWACCM/CPU%d/InGC", i);
467 AssertRC(rc);
468
469# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
470 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatWorldSwitch3264, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of the 32/64 switcher",
471 "/PROF/HWACCM/CPU%d/Switcher3264", i);
472 AssertRC(rc);
473# endif
474
475# define HWACCM_REG_COUNTER(a, b) \
476 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Profiling of vmlaunch", b, i); \
477 AssertRC(rc);
478
479 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowNM, "/HWACCM/CPU%d/Exit/Trap/Shw/#NM");
480 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestNM, "/HWACCM/CPU%d/Exit/Trap/Gst/#NM");
481 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowPF, "/HWACCM/CPU%d/Exit/Trap/Shw/#PF");
482 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestPF, "/HWACCM/CPU%d/Exit/Trap/Gst/#PF");
483 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestUD, "/HWACCM/CPU%d/Exit/Trap/Gst/#UD");
484 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestSS, "/HWACCM/CPU%d/Exit/Trap/Gst/#SS");
485 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestNP, "/HWACCM/CPU%d/Exit/Trap/Gst/#NP");
486 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestGP, "/HWACCM/CPU%d/Exit/Trap/Gst/#GP");
487 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestMF, "/HWACCM/CPU%d/Exit/Trap/Gst/#MF");
488 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestDE, "/HWACCM/CPU%d/Exit/Trap/Gst/#DE");
489 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestDB, "/HWACCM/CPU%d/Exit/Trap/Gst/#DB");
490 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInvpg, "/HWACCM/CPU%d/Exit/Instr/Invlpg");
491 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInvd, "/HWACCM/CPU%d/Exit/Instr/Invd");
492 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCpuid, "/HWACCM/CPU%d/Exit/Instr/Cpuid");
493 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdtsc, "/HWACCM/CPU%d/Exit/Instr/Rdtsc");
494 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdpmc, "/HWACCM/CPU%d/Exit/Instr/Rdpmc");
495 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdmsr, "/HWACCM/CPU%d/Exit/Instr/Rdmsr");
496 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitWrmsr, "/HWACCM/CPU%d/Exit/Instr/Wrmsr");
497 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMwait, "/HWACCM/CPU%d/Exit/Instr/Mwait");
498 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitDRxWrite, "/HWACCM/CPU%d/Exit/Instr/DR/Write");
499 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitDRxRead, "/HWACCM/CPU%d/Exit/Instr/DR/Read");
500 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCLTS, "/HWACCM/CPU%d/Exit/Instr/CLTS");
501 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitLMSW, "/HWACCM/CPU%d/Exit/Instr/LMSW");
502 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCli, "/HWACCM/CPU%d/Exit/Instr/Cli");
503 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitSti, "/HWACCM/CPU%d/Exit/Instr/Sti");
504 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPushf, "/HWACCM/CPU%d/Exit/Instr/Pushf");
505 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPopf, "/HWACCM/CPU%d/Exit/Instr/Popf");
506 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIret, "/HWACCM/CPU%d/Exit/Instr/Iret");
507 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInt, "/HWACCM/CPU%d/Exit/Instr/Int");
508 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitHlt, "/HWACCM/CPU%d/Exit/Instr/Hlt");
509 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOWrite, "/HWACCM/CPU%d/Exit/IO/Write");
510 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIORead, "/HWACCM/CPU%d/Exit/IO/Read");
511 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOStringWrite, "/HWACCM/CPU%d/Exit/IO/WriteString");
512 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOStringRead, "/HWACCM/CPU%d/Exit/IO/ReadString");
513 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIrqWindow, "/HWACCM/CPU%d/Exit/IrqWindow");
514 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMaxResume, "/HWACCM/CPU%d/Exit/MaxResume");
515 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPreemptPending, "/HWACCM/CPU%d/Exit/PreemptPending");
516
517 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchGuestIrq, "/HWACCM/CPU%d/Switch/IrqPending");
518 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchToR3, "/HWACCM/CPU%d/Switch/ToR3");
519
520 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatIntInject, "/HWACCM/CPU%d/Irq/Inject");
521 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatIntReinject, "/HWACCM/CPU%d/Irq/Reinject");
522 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatPendingHostIrq, "/HWACCM/CPU%d/Irq/PendingOnHost");
523
524 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPage, "/HWACCM/CPU%d/Flush/Page");
525 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPageManual, "/HWACCM/CPU%d/Flush/Page/Virt");
526 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPhysPageManual, "/HWACCM/CPU%d/Flush/Page/Phys");
527 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLB, "/HWACCM/CPU%d/Flush/TLB");
528 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBManual, "/HWACCM/CPU%d/Flush/TLB/Manual");
529 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBCRxChange, "/HWACCM/CPU%d/Flush/TLB/CRx");
530 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPageInvlpg, "/HWACCM/CPU%d/Flush/Page/Invlpg");
531 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch, "/HWACCM/CPU%d/Flush/TLB/Switch");
532 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch, "/HWACCM/CPU%d/Flush/TLB/Skipped");
533 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushASID, "/HWACCM/CPU%d/Flush/TLB/ASID");
534 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBInvlpga, "/HWACCM/CPU%d/Flush/TLB/PhysInvl");
535 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdown, "/HWACCM/CPU%d/Flush/Shootdown/Page");
536 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdownFlush, "/HWACCM/CPU%d/Flush/Shootdown/TLB");
537
538 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCOffset, "/HWACCM/CPU%d/TSC/Offset");
539 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCIntercept, "/HWACCM/CPU%d/TSC/Intercept");
540 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCInterceptOverFlow, "/HWACCM/CPU%d/TSC/InterceptOverflow");
541
542 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxArmed, "/HWACCM/CPU%d/Debug/Armed");
543 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxContextSwitch, "/HWACCM/CPU%d/Debug/ContextSwitch");
544 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxIOCheck, "/HWACCM/CPU%d/Debug/IOCheck");
545
546 for (unsigned j=0;j<RT_ELEMENTS(pVCpu->hwaccm.s.StatExitCRxWrite);j++)
547 {
548 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitCRxWrite[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx writes",
549 "/HWACCM/CPU%d/Exit/Instr/CR/Write/%x", i, j);
550 AssertRC(rc);
551 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitCRxRead[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx reads",
552 "/HWACCM/CPU%d/Exit/Instr/CR/Read/%x", i, j);
553 AssertRC(rc);
554 }
555
556#undef HWACCM_REG_COUNTER
557
558 pVCpu->hwaccm.s.paStatExitReason = NULL;
559
560 rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT*sizeof(*pVCpu->hwaccm.s.paStatExitReason), 0, MM_TAG_HWACCM, (void **)&pVCpu->hwaccm.s.paStatExitReason);
561 AssertRC(rc);
562 if (RT_SUCCESS(rc))
563 {
564 const char * const *papszDesc = ASMIsIntelCpu() ? &g_apszVTxExitReasons[0] : &g_apszAmdVExitReasons[0];
565 for (int j=0;j<MAX_EXITREASON_STAT;j++)
566 {
567 if (papszDesc[j])
568 {
569 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
570 papszDesc[j], "/HWACCM/CPU%d/Exit/Reason/%02x", i, j);
571 AssertRC(rc);
572 }
573 }
574 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitReasonNPF, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Nested page fault", "/HWACCM/CPU%d/Exit/Reason/#NPF", i);
575 AssertRC(rc);
576 }
577 pVCpu->hwaccm.s.paStatExitReasonR0 = MMHyperR3ToR0(pVM, pVCpu->hwaccm.s.paStatExitReason);
578# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
579 Assert(pVCpu->hwaccm.s.paStatExitReasonR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
580# else
581 Assert(pVCpu->hwaccm.s.paStatExitReasonR0 != NIL_RTR0PTR);
582# endif
583
584 rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * 256, 8, MM_TAG_HWACCM, (void **)&pVCpu->hwaccm.s.paStatInjectedIrqs);
585 AssertRCReturn(rc, rc);
586 pVCpu->hwaccm.s.paStatInjectedIrqsR0 = MMHyperR3ToR0(pVM, pVCpu->hwaccm.s.paStatInjectedIrqs);
587# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
588 Assert(pVCpu->hwaccm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
589# else
590 Assert(pVCpu->hwaccm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR);
591# endif
592 for (unsigned j = 0; j < 255; j++)
593 STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.paStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Forwarded interrupts.",
594 (j < 0x20) ? "/HWACCM/CPU%d/Interrupt/Trap/%02X" : "/HWACCM/CPU%d/Interrupt/IRQ/%02X", i, j);
595
596 }
597#endif /* VBOX_WITH_STATISTICS */
598
599#ifdef VBOX_WITH_CRASHDUMP_MAGIC
600 /* Magic marker for searching in crash dumps. */
601 for (VMCPUID i = 0; i < pVM->cCpus; i++)
602 {
603 PVMCPU pVCpu = &pVM->aCpus[i];
604
605 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
606 strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
607 pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
608 }
609#endif
610 return VINF_SUCCESS;
611}
612
613/**
614 * Turns off normal raw mode features
615 *
616 * @param pVM The VM to operate on.
617 */
618static void hwaccmR3DisableRawMode(PVM pVM)
619{
620 /* Disable PATM & CSAM. */
621 PATMR3AllowPatching(pVM, false);
622 CSAMDisableScanning(pVM);
623
624 /* Turn off IDT/LDT/GDT and TSS monitoring and sycing. */
625 SELMR3DisableMonitoring(pVM);
626 TRPMR3DisableMonitoring(pVM);
627
628 /* Disable the switcher code (safety precaution). */
629 VMMR3DisableSwitcher(pVM);
630
631 /* Disable mapping of the hypervisor into the shadow page table. */
632 PGMR3MappingsDisable(pVM);
633
634 /* Disable the switcher */
635 VMMR3DisableSwitcher(pVM);
636
637 /* Reinit the paging mode to force the new shadow mode. */
638 for (VMCPUID i = 0; i < pVM->cCpus; i++)
639 {
640 PVMCPU pVCpu = &pVM->aCpus[i];
641
642 PGMR3ChangeMode(pVM, pVCpu, PGMMODE_REAL);
643 }
644}
645
646/**
647 * Initialize VT-x or AMD-V.
648 *
649 * @returns VBox status code.
650 * @param pVM The VM handle.
651 */
652VMMR3DECL(int) HWACCMR3InitFinalizeR0(PVM pVM)
653{
654 int rc;
655
656 /* Hack to allow users to work around broken BIOSes that incorrectly set EFER.SVME, which makes us believe somebody else
657 * is already using AMD-V.
658 */
659 if ( !pVM->hwaccm.s.vmx.fSupported
660 && !pVM->hwaccm.s.svm.fSupported
661 && pVM->hwaccm.s.lLastError == VERR_SVM_IN_USE /* implies functional AMD-V */
662 && RTEnvExist("VBOX_HWVIRTEX_IGNORE_SVM_IN_USE"))
663 {
664 LogRel(("HWACCM: VBOX_HWVIRTEX_IGNORE_SVM_IN_USE active!\n"));
665 pVM->hwaccm.s.svm.fSupported = true;
666 pVM->hwaccm.s.svm.fIgnoreInUseError = true;
667 }
668 else
669 if ( !pVM->hwaccm.s.vmx.fSupported
670 && !pVM->hwaccm.s.svm.fSupported)
671 {
672 LogRel(("HWACCM: No VT-x or AMD-V CPU extension found. Reason %Rrc\n", pVM->hwaccm.s.lLastError));
673 LogRel(("HWACCM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
674
675 if (VMMIsHwVirtExtForced(pVM))
676 {
677 switch (pVM->hwaccm.s.lLastError)
678 {
679 case VERR_VMX_NO_VMX:
680 return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is not available.");
681 case VERR_VMX_IN_VMX_ROOT_MODE:
682 return VM_SET_ERROR(pVM, VERR_VMX_IN_VMX_ROOT_MODE, "VT-x is being used by another hypervisor.");
683 case VERR_SVM_IN_USE:
684 return VM_SET_ERROR(pVM, VERR_SVM_IN_USE, "AMD-V is being used by another hypervisor.");
685 case VERR_SVM_NO_SVM:
686 return VM_SET_ERROR(pVM, VERR_SVM_NO_SVM, "AMD-V is not available.");
687 case VERR_SVM_DISABLED:
688 return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS.");
689 default:
690 return pVM->hwaccm.s.lLastError;
691 }
692 }
693 return VINF_SUCCESS;
694 }
695
696 if (pVM->hwaccm.s.vmx.fSupported)
697 {
698 rc = SUPR3QueryVTxSupported();
699 if (RT_FAILURE(rc))
700 {
701#ifdef RT_OS_LINUX
702 LogRel(("HWACCM: The host kernel does not support VT-x -- Linux 2.6.13 or newer required!\n"));
703#else
704 LogRel(("HWACCM: The host kernel does not support VT-x!\n"));
705#endif
706 if ( pVM->cCpus > 1
707 || VMMIsHwVirtExtForced(pVM))
708 return rc;
709
710 /* silently fall back to raw mode */
711 return VINF_SUCCESS;
712 }
713 }
714
715 if (!pVM->hwaccm.s.fAllowed)
716 return VINF_SUCCESS; /* nothing to do */
717
718 /* Enable VT-x or AMD-V on all host CPUs. */
719 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_ENABLE, 0, NULL);
720 if (RT_FAILURE(rc))
721 {
722 LogRel(("HWACCMR3InitFinalize: SUPR3CallVMMR0Ex VMMR0_DO_HWACC_ENABLE failed with %Rrc\n", rc));
723 return rc;
724 }
725 Assert(!pVM->fHWACCMEnabled || VMMIsHwVirtExtForced(pVM));
726
727 pVM->hwaccm.s.fHasIoApic = PDMHasIoApic(pVM);
728 /* No TPR patching is required when the IO-APIC is not enabled for this VM. (Main should have taken care of this already) */
729 if (!pVM->hwaccm.s.fHasIoApic)
730 {
731 Assert(!pVM->hwaccm.s.fTRPPatchingAllowed); /* paranoia */
732 pVM->hwaccm.s.fTRPPatchingAllowed = false;
733 }
734
735 if (pVM->hwaccm.s.vmx.fSupported)
736 {
737 Log(("pVM->hwaccm.s.vmx.fSupported = %d\n", pVM->hwaccm.s.vmx.fSupported));
738
739 if ( pVM->hwaccm.s.fInitialized == false
740 && pVM->hwaccm.s.vmx.msr.feature_ctrl != 0)
741 {
742 uint64_t val;
743 RTGCPHYS GCPhys = 0;
744
745 LogRel(("HWACCM: Host CR4=%08X\n", pVM->hwaccm.s.vmx.hostCR4));
746 LogRel(("HWACCM: MSR_IA32_FEATURE_CONTROL = %RX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
747 LogRel(("HWACCM: MSR_IA32_VMX_BASIC_INFO = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_basic_info));
748 LogRel(("HWACCM: VMCS id = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
749 LogRel(("HWACCM: VMCS size = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
750 LogRel(("HWACCM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hwaccm.s.vmx.msr.vmx_basic_info) ? "< 4 GB" : "None"));
751 LogRel(("HWACCM: VMCS memory type = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
752 LogRel(("HWACCM: Dual monitor treatment = %d\n", MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
753
754 LogRel(("HWACCM: MSR_IA32_VMX_PINBASED_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.u));
755 val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
756 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
757 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT\n"));
758 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
759 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT\n"));
760 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI)
761 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI\n"));
762 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
763 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER\n"));
764 val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
765 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
766 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT *must* be set\n"));
767 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
768 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT *must* be set\n"));
769 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI)
770 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI *must* be set\n"));
771 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
772 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER *must* be set\n"));
773
774 LogRel(("HWACCM: MSR_IA32_VMX_PROCBASED_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.u));
775 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
776 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
777 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT\n"));
778 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
779 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET\n"));
780 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
781 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT\n"));
782 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
783 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT\n"));
784 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
785 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT\n"));
786 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
787 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT\n"));
788 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
789 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT\n"));
790 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT)
791 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT\n"));
792 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT)
793 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT\n"));
794 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
795 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT\n"));
796 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
797 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT\n"));
798 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
799 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW\n"));
800 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT)
801 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT\n"));
802 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
803 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT\n"));
804 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
805 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT\n"));
806 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
807 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS\n"));
808 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
809 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG\n"));
810 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
811 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS\n"));
812 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
813 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT\n"));
814 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
815 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT\n"));
816 if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
817 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL\n"));
818
819 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
820 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
821 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT *must* be set\n"));
822 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
823 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET *must* be set\n"));
824 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
825 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT *must* be set\n"));
826 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
827 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT *must* be set\n"));
828 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
829 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT *must* be set\n"));
830 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
831 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT *must* be set\n"));
832 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
833 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT *must* be set\n"));
834 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT)
835 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT *must* be set\n"));
836 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT)
837 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT *must* be set\n"));
838 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
839 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT *must* be set\n"));
840 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
841 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT *must* be set\n"));
842 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
843 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW *must* be set\n"));
844 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT)
845 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT *must* be set\n"));
846 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
847 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT *must* be set\n"));
848 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
849 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT *must* be set\n"));
850 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
851 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS *must* be set\n"));
852 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
853 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG *must* be set\n"));
854 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
855 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS *must* be set\n"));
856 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
857 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT *must* be set\n"));
858 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
859 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT *must* be set\n"));
860 if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
861 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL *must* be set\n"));
862
863 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
864 {
865 LogRel(("HWACCM: MSR_IA32_VMX_PROCBASED_CTLS2 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.u));
866 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
867 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
868 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC\n"));
869 if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
870 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_EPT\n"));
871 if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
872 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT\n"));
873 if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT)
874 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT\n"));
875 if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
876 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC\n"));
877 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
878 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VPID\n"));
879 if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
880 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT\n"));
881 if (val & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE)
882 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE\n"));
883 if (val & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT)
884 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT\n"));
885
886 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;
887 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
888 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC *must* be set\n"));
889 if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
890 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT *must* be set\n"));
891 if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT)
892 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT *must* be set\n"));
893 if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
894 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC *must* be set\n"));
895 if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
896 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_EPT *must* be set\n"));
897 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
898 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VPID *must* be set\n"));
899 if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
900 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT *must* be set\n"));
901 if (val & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE)
902 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE *must* be set\n"));
903 if (val & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT)
904 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT *must* be set\n"));
905 }
906
907 LogRel(("HWACCM: MSR_IA32_VMX_ENTRY_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_entry.u));
908 val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1;
909 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
910 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG\n"));
911 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
912 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE\n"));
913 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
914 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM\n"));
915 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
916 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON\n"));
917 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR)
918 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR\n"));
919 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR)
920 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR\n"));
921 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
922 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR\n"));
923 val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0;
924 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
925 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG *must* be set\n"));
926 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
927 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE *must* be set\n"));
928 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
929 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM *must* be set\n"));
930 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
931 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON *must* be set\n"));
932 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR)
933 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR *must* be set\n"));
934 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR)
935 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR *must* be set\n"));
936 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
937 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR *must* be set\n"));
938
939 LogRel(("HWACCM: MSR_IA32_VMX_EXIT_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_exit.u));
940 val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1;
941 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG)
942 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG\n"));
943 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
944 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64\n"));
945 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
946 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ\n"));
947 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR)
948 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR\n"));
949 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR)
950 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR\n"));
951 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR)
952 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR\n"));
953 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
954 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR\n"));
955 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
956 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER\n"));
957 val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0;
958 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG)
959 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG *must* be set\n"));
960 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
961 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64 *must* be set\n"));
962 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
963 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ *must* be set\n"));
964 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR)
965 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR *must* be set\n"));
966 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR)
967 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR *must* be set\n"));
968 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR)
969 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR *must* be set\n"));
970 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
971 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR *must* be set\n"));
972 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
973 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER *must* be set\n"));
974
975 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps)
976 {
977 LogRel(("HWACCM: MSR_IA32_VMX_EPT_VPID_CAPS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_eptcaps));
978
979 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY)
980 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY\n"));
981 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY)
982 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY\n"));
983 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY)
984 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY\n"));
985 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS)
986 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS\n"));
987 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS)
988 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS\n"));
989 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS)
990 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS\n"));
991 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS)
992 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS\n"));
993 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS)
994 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS\n"));
995 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_UC)
996 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_UC\n"));
997 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WC)
998 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WC\n"));
999 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WT)
1000 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WT\n"));
1001 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WP)
1002 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WP\n"));
1003 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WB)
1004 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WB\n"));
1005 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_21_BITS)
1006 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_21_BITS\n"));
1007 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_30_BITS)
1008 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_30_BITS\n"));
1009 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_39_BITS)
1010 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_39_BITS\n"));
1011 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_48_BITS)
1012 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_48_BITS\n"));
1013 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT)
1014 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT\n"));
1015 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_INDIV)
1016 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_INDIV\n"));
1017 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_CONTEXT)
1018 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_CONTEXT\n"));
1019 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL)
1020 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL\n"));
1021 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID)
1022 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID\n"));
1023 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV)
1024 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV\n"));
1025 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT)
1026 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT\n"));
1027 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL)
1028 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL\n"));
1029 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT_GLOBAL)
1030 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT_GLOBAL\n"));
1031 }
1032
1033 LogRel(("HWACCM: MSR_IA32_VMX_MISC = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_misc));
1034 LogRel(("HWACCM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc)));
1035 LogRel(("HWACCM: MSR_IA32_VMX_MISC_ACTIVITY_STATES %x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hwaccm.s.vmx.msr.vmx_misc)));
1036 LogRel(("HWACCM: MSR_IA32_VMX_MISC_CR3_TARGET %x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hwaccm.s.vmx.msr.vmx_misc)));
1037 LogRel(("HWACCM: MSR_IA32_VMX_MISC_MAX_MSR %x\n", MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc)));
1038 LogRel(("HWACCM: MSR_IA32_VMX_MISC_MSEG_ID %x\n", MSR_IA32_VMX_MISC_MSEG_ID(pVM->hwaccm.s.vmx.msr.vmx_misc)));
1039
1040 LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED0 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0));
1041 LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED1 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1));
1042 LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED0 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0));
1043 LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED1 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1));
1044 LogRel(("HWACCM: MSR_IA32_VMX_VMCS_ENUM = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum));
1045
1046 LogRel(("HWACCM: TPR shadow physaddr = %RHp\n", pVM->hwaccm.s.vmx.pAPICPhys));
1047
1048 /* Paranoia */
1049 AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc) >= 512);
1050
1051 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1052 {
1053 LogRel(("HWACCM: VCPU%d: MSR bitmap physaddr = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pMSRBitmapPhys));
1054 LogRel(("HWACCM: VCPU%d: VMCS physaddr = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys));
1055 }
1056
1057#ifdef HWACCM_VTX_WITH_EPT
1058 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
1059 pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;
1060#endif /* HWACCM_VTX_WITH_EPT */
1061#ifdef HWACCM_VTX_WITH_VPID
1062 if ( (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
1063 && !pVM->hwaccm.s.fNestedPaging) /* VPID and EPT are mutually exclusive. */
1064 pVM->hwaccm.s.vmx.fVPID = pVM->hwaccm.s.vmx.fAllowVPID;
1065#endif /* HWACCM_VTX_WITH_VPID */
1066
1067 /* Unrestricted guest execution relies on EPT. */
1068 if ( pVM->hwaccm.s.fNestedPaging
1069 && (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE))
1070 {
1071 pVM->hwaccm.s.vmx.fUnrestrictedGuest = true;
1072 }
1073
1074 /* Only try once. */
1075 pVM->hwaccm.s.fInitialized = true;
1076
1077 if (!pVM->hwaccm.s.vmx.fUnrestrictedGuest)
1078 {
1079 /* Allocate three pages for the TSS we need for real mode emulation. (2 pages for the IO bitmap) */
1080 rc = PDMR3VMMDevHeapAlloc(pVM, HWACCM_VTX_TOTAL_DEVHEAP_MEM, (RTR3PTR *)&pVM->hwaccm.s.vmx.pRealModeTSS);
1081 if (RT_SUCCESS(rc))
1082 {
1083 /* The I/O bitmap starts right after the virtual interrupt redirection bitmap. */
1084 ASMMemZero32(pVM->hwaccm.s.vmx.pRealModeTSS, sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS));
1085 pVM->hwaccm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS);
1086 /* Bit set to 0 means redirection enabled. */
1087 memset(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap, 0x0, sizeof(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap));
1088 /* Allow all port IO, so the VT-x IO intercepts do their job. */
1089 memset(pVM->hwaccm.s.vmx.pRealModeTSS + 1, 0, PAGE_SIZE*2);
1090 *((unsigned char *)pVM->hwaccm.s.vmx.pRealModeTSS + HWACCM_VTX_TSS_SIZE - 2) = 0xff;
1091
1092 /* Construct a 1024 element page directory with 4 MB pages for the identity mapped page table used in
1093 * real and protected mode without paging with EPT.
1094 */
1095 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hwaccm.s.vmx.pRealModeTSS + PAGE_SIZE * 3);
1096 for (unsigned i=0;i<X86_PG_ENTRIES;i++)
1097 {
1098 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable->a[i].u = _4M * i;
1099 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_G;
1100 }
1101
1102 /* We convert it here every time as pci regions could be reconfigured. */
1103 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pRealModeTSS, &GCPhys);
1104 AssertRC(rc);
1105 LogRel(("HWACCM: Real Mode TSS guest physaddr = %RGp\n", GCPhys));
1106
1107 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
1108 AssertRC(rc);
1109 LogRel(("HWACCM: Non-Paging Mode EPT CR3 = %RGp\n", GCPhys));
1110 }
1111 else
1112 {
1113 LogRel(("HWACCM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));
1114 pVM->hwaccm.s.vmx.pRealModeTSS = NULL;
1115 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable = NULL;
1116 }
1117 }
1118
1119 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
1120 AssertRC(rc);
1121 if (rc == VINF_SUCCESS)
1122 {
1123 pVM->fHWACCMEnabled = true;
1124 pVM->hwaccm.s.vmx.fEnabled = true;
1125 hwaccmR3DisableRawMode(pVM);
1126
1127 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1128#ifdef VBOX_ENABLE_64_BITS_GUESTS
1129 if (pVM->hwaccm.s.fAllow64BitGuests)
1130 {
1131 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1132 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1133 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* 64 bits only on Intel CPUs */
1134 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1135 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
1136 }
1137 else
1138 /* Turn on NXE if PAE has been enabled *and* the host has turned on NXE (we reuse the host EFER in the switcher) */
1139 /* Todo: this needs to be fixed properly!! */
1140 if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE)
1141 && (pVM->hwaccm.s.vmx.hostEFER & MSR_K6_EFER_NXE))
1142 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
1143
1144 LogRel((pVM->hwaccm.s.fAllow64BitGuests
1145 ? "HWACCM: 32-bit and 64-bit guests supported.\n"
1146 : "HWACCM: 32-bit guests supported.\n"));
1147#else
1148 LogRel(("HWACCM: 32-bit guests supported.\n"));
1149#endif
1150 LogRel(("HWACCM: VMX enabled!\n"));
1151 if (pVM->hwaccm.s.fNestedPaging)
1152 {
1153 LogRel(("HWACCM: Enabled nested paging\n"));
1154 LogRel(("HWACCM: EPT root page = %RHp\n", PGMGetHyperCR3(VMMGetCpu(pVM))));
1155 if (pVM->hwaccm.s.vmx.fUnrestrictedGuest)
1156 LogRel(("HWACCM: Unrestricted guest execution enabled!\n"));
1157 }
1158 else
1159 Assert(!pVM->hwaccm.s.vmx.fUnrestrictedGuest);
1160
1161 if (pVM->hwaccm.s.vmx.fVPID)
1162 LogRel(("HWACCM: Enabled VPID\n"));
1163
1164 if ( pVM->hwaccm.s.fNestedPaging
1165 || pVM->hwaccm.s.vmx.fVPID)
1166 {
1167 LogRel(("HWACCM: enmFlushPage %d\n", pVM->hwaccm.s.vmx.enmFlushPage));
1168 LogRel(("HWACCM: enmFlushContext %d\n", pVM->hwaccm.s.vmx.enmFlushContext));
1169 }
1170
1171 /* TPR patching status logging. */
1172 if (pVM->hwaccm.s.fTRPPatchingAllowed)
1173 {
1174 if ( (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1175 && (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
1176 {
1177 pVM->hwaccm.s.fTRPPatchingAllowed = false; /* not necessary as we have a hardware solution. */
1178 LogRel(("HWACCM: TPR Patching not required (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).\n"));
1179 }
1180 else
1181 {
1182 uint32_t u32Eax, u32Dummy;
1183
1184 /* TPR patching needs access to the MSR_K8_LSTAR msr. */
1185 ASMCpuId(0x80000000, &u32Eax, &u32Dummy, &u32Dummy, &u32Dummy);
1186 if ( u32Eax < 0x80000001
1187 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1188 {
1189 pVM->hwaccm.s.fTRPPatchingAllowed = false;
1190 LogRel(("HWACCM: TPR patching disabled (long mode not supported).\n"));
1191 }
1192 }
1193 }
1194 LogRel(("HWACCM: TPR Patching %s.\n", (pVM->hwaccm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
1195 }
1196 else
1197 {
1198 LogRel(("HWACCM: VMX setup failed with rc=%Rrc!\n", rc));
1199 LogRel(("HWACCM: Last instruction error %x\n", pVM->aCpus[0].hwaccm.s.vmx.lasterror.ulInstrError));
1200 pVM->fHWACCMEnabled = false;
1201 }
1202 }
1203 }
1204 else
1205 if (pVM->hwaccm.s.svm.fSupported)
1206 {
1207 Log(("pVM->hwaccm.s.svm.fSupported = %d\n", pVM->hwaccm.s.svm.fSupported));
1208
1209 if (pVM->hwaccm.s.fInitialized == false)
1210 {
1211 /* Erratum 170 which requires a forced TLB flush for each world switch:
1212 * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
1213 *
1214 * All BH-G1/2 and DH-G1/2 models include a fix:
1215 * Athlon X2: 0x6b 1/2
1216 * 0x68 1/2
1217 * Athlon 64: 0x7f 1
1218 * 0x6f 2
1219 * Sempron: 0x7f 1/2
1220 * 0x6f 2
1221 * 0x6c 2
1222 * 0x7c 2
1223 * Turion 64: 0x68 2
1224 *
1225 */
1226 uint32_t u32Dummy;
1227 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
1228 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
1229 u32BaseFamily= (u32Version >> 8) & 0xf;
1230 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
1231 u32Model = ((u32Version >> 4) & 0xf);
1232 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
1233 u32Stepping = u32Version & 0xf;
1234 if ( u32Family == 0xf
1235 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
1236 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
1237 {
1238 LogRel(("HWACMM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
1239 }
1240
1241 LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureECX = %RX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureECX));
1242 LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureEDX = %RX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureEDX));
1243 LogRel(("HWACCM: AMD HWCR MSR = %RX64\n", pVM->hwaccm.s.svm.msrHWCR));
1244 LogRel(("HWACCM: AMD-V revision = %X\n", pVM->hwaccm.s.svm.u32Rev));
1245 LogRel(("HWACCM: AMD-V max ASID = %d\n", pVM->hwaccm.s.uMaxASID));
1246 LogRel(("HWACCM: AMD-V features = %X\n", pVM->hwaccm.s.svm.u32Features));
1247
1248 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
1249 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING\n"));
1250 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT)
1251 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT\n"));
1252 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK)
1253 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK\n"));
1254 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
1255 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE\n"));
1256 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_SSE_3_5_DISABLE)
1257 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_SSE_3_5_DISABLE\n"));
1258 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER)
1259 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER\n"));
1260
1261 /* Only try once. */
1262 pVM->hwaccm.s.fInitialized = true;
1263
1264 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
1265 pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;
1266
1267 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
1268 AssertRC(rc);
1269 if (rc == VINF_SUCCESS)
1270 {
1271 pVM->fHWACCMEnabled = true;
1272 pVM->hwaccm.s.svm.fEnabled = true;
1273
1274 if (pVM->hwaccm.s.fNestedPaging)
1275 LogRel(("HWACCM: Enabled nested paging\n"));
1276
1277 hwaccmR3DisableRawMode(pVM);
1278 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1279 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);
1280 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
1281#ifdef VBOX_ENABLE_64_BITS_GUESTS
1282 if (pVM->hwaccm.s.fAllow64BitGuests)
1283 {
1284 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1285 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1286 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
1287 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1288 }
1289 else
1290 /* Turn on NXE if PAE has been enabled. */
1291 if (CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
1292 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
1293#endif
1294
1295 LogRel((pVM->hwaccm.s.fAllow64BitGuests
1296 ? "HWACCM: 32-bit and 64-bit guest supported.\n"
1297 : "HWACCM: 32-bit guest supported.\n"));
1298
1299 LogRel(("HWACCM: TPR Patching %s.\n", (pVM->hwaccm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
1300 }
1301 else
1302 {
1303 pVM->fHWACCMEnabled = false;
1304 }
1305 }
1306 }
1307 if (pVM->fHWACCMEnabled)
1308 LogRel(("HWACCM: VT-x/AMD-V init method: %s\n", (pVM->hwaccm.s.fGlobalInit) ? "GLOBAL" : "LOCAL"));
1309 return VINF_SUCCESS;
1310}
1311
1312/**
1313 * Applies relocations to data and code managed by this
1314 * component. This function will be called at init and
1315 * whenever the VMM need to relocate it self inside the GC.
1316 *
1317 * @param pVM The VM.
1318 */
1319VMMR3DECL(void) HWACCMR3Relocate(PVM pVM)
1320{
1321 Log(("HWACCMR3Relocate to %RGv\n", MMHyperGetArea(pVM, 0)));
1322
1323 /* Fetch the current paging mode during the relocate callback during state loading. */
1324 if (VMR3GetState(pVM) == VMSTATE_LOADING)
1325 {
1326 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1327 {
1328 PVMCPU pVCpu = &pVM->aCpus[i];
1329
1330 pVCpu->hwaccm.s.enmShadowMode = PGMGetShadowMode(pVCpu);
1331 Assert(pVCpu->hwaccm.s.vmx.enmCurrGuestMode == PGMGetGuestMode(pVCpu));
1332 pVCpu->hwaccm.s.vmx.enmCurrGuestMode = PGMGetGuestMode(pVCpu);
1333 }
1334 }
1335#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1336 if (pVM->fHWACCMEnabled)
1337 {
1338 int rc;
1339
1340 switch(PGMGetHostMode(pVM))
1341 {
1342 case PGMMODE_32_BIT:
1343 pVM->hwaccm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_32_TO_AMD64);
1344 break;
1345
1346 case PGMMODE_PAE:
1347 case PGMMODE_PAE_NX:
1348 pVM->hwaccm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_PAE_TO_AMD64);
1349 break;
1350
1351 default:
1352 AssertFailed();
1353 break;
1354 }
1355 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "VMXGCStartVM64", &pVM->hwaccm.s.pfnVMXGCStartVM64);
1356 AssertReleaseMsgRC(rc, ("VMXGCStartVM64 -> rc=%Rrc\n", rc));
1357
1358 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "SVMGCVMRun64", &pVM->hwaccm.s.pfnSVMGCVMRun64);
1359 AssertReleaseMsgRC(rc, ("SVMGCVMRun64 -> rc=%Rrc\n", rc));
1360
1361 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HWACCMSaveGuestFPU64", &pVM->hwaccm.s.pfnSaveGuestFPU64);
1362 AssertReleaseMsgRC(rc, ("HWACCMSetupFPU64 -> rc=%Rrc\n", rc));
1363
1364 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HWACCMSaveGuestDebug64", &pVM->hwaccm.s.pfnSaveGuestDebug64);
1365 AssertReleaseMsgRC(rc, ("HWACCMSetupDebug64 -> rc=%Rrc\n", rc));
1366
1367# ifdef DEBUG
1368 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HWACCMTestSwitcher64", &pVM->hwaccm.s.pfnTest64);
1369 AssertReleaseMsgRC(rc, ("HWACCMTestSwitcher64 -> rc=%Rrc\n", rc));
1370# endif
1371 }
1372#endif
1373 return;
1374}
1375
1376/**
1377 * Checks hardware accelerated raw mode is allowed.
1378 *
1379 * @returns boolean
1380 * @param pVM The VM to operate on.
1381 */
1382VMMR3DECL(bool) HWACCMR3IsAllowed(PVM pVM)
1383{
1384 return pVM->hwaccm.s.fAllowed;
1385}
1386
1387/**
1388 * Notification callback which is called whenever there is a chance that a CR3
1389 * value might have changed.
1390 *
1391 * This is called by PGM.
1392 *
1393 * @param pVM The VM to operate on.
1394 * @param pVCpu The VMCPU to operate on.
1395 * @param enmShadowMode New shadow paging mode.
1396 * @param enmGuestMode New guest paging mode.
1397 */
1398VMMR3DECL(void) HWACCMR3PagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
1399{
1400 /* Ignore page mode changes during state loading. */
1401 if (VMR3GetState(pVCpu->pVMR3) == VMSTATE_LOADING)
1402 return;
1403
1404 pVCpu->hwaccm.s.enmShadowMode = enmShadowMode;
1405
1406 if ( pVM->hwaccm.s.vmx.fEnabled
1407 && pVM->fHWACCMEnabled)
1408 {
1409 if ( pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
1410 && enmGuestMode >= PGMMODE_PROTECTED)
1411 {
1412 PCPUMCTX pCtx;
1413
1414 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1415
1416 /* After a real mode switch to protected mode we must force
1417 * CPL to 0. Our real mode emulation had to set it to 3.
1418 */
1419 pCtx->ssHid.Attr.n.u2Dpl = 0;
1420 }
1421 }
1422
1423 if (pVCpu->hwaccm.s.vmx.enmCurrGuestMode != enmGuestMode)
1424 {
1425 /* Keep track of paging mode changes. */
1426 pVCpu->hwaccm.s.vmx.enmPrevGuestMode = pVCpu->hwaccm.s.vmx.enmCurrGuestMode;
1427 pVCpu->hwaccm.s.vmx.enmCurrGuestMode = enmGuestMode;
1428
1429 /* Did we miss a change, because all code was executed in the recompiler? */
1430 if (pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == enmGuestMode)
1431 {
1432 Log(("HWACCMR3PagingModeChanged missed %s->%s transition (prev %s)\n", PGMGetModeName(pVCpu->hwaccm.s.vmx.enmPrevGuestMode), PGMGetModeName(pVCpu->hwaccm.s.vmx.enmCurrGuestMode), PGMGetModeName(pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode)));
1433 pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = pVCpu->hwaccm.s.vmx.enmPrevGuestMode;
1434 }
1435 }
1436
1437 /* Reset the contents of the read cache. */
1438 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
1439 for (unsigned j=0;j<pCache->Read.cValidEntries;j++)
1440 pCache->Read.aFieldVal[j] = 0;
1441}
1442
1443/**
1444 * Terminates the HWACCM.
1445 *
1446 * Termination means cleaning up and freeing all resources,
1447 * the VM it self is at this point powered off or suspended.
1448 *
1449 * @returns VBox status code.
1450 * @param pVM The VM to operate on.
1451 */
1452VMMR3DECL(int) HWACCMR3Term(PVM pVM)
1453{
1454 if (pVM->hwaccm.s.vmx.pRealModeTSS)
1455 {
1456 PDMR3VMMDevHeapFree(pVM, pVM->hwaccm.s.vmx.pRealModeTSS);
1457 pVM->hwaccm.s.vmx.pRealModeTSS = 0;
1458 }
1459 HWACCMR3TermCPU(pVM);
1460 return 0;
1461}
1462
1463/**
1464 * Terminates the per-VCPU HWACCM.
1465 *
1466 * Termination means cleaning up and freeing all resources,
1467 * the VM it self is at this point powered off or suspended.
1468 *
1469 * @returns VBox status code.
1470 * @param pVM The VM to operate on.
1471 */
1472VMMR3DECL(int) HWACCMR3TermCPU(PVM pVM)
1473{
1474 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1475 {
1476 PVMCPU pVCpu = &pVM->aCpus[i];
1477
1478#ifdef VBOX_WITH_STATISTICS
1479 if (pVCpu->hwaccm.s.paStatExitReason)
1480 {
1481 MMHyperFree(pVM, pVCpu->hwaccm.s.paStatExitReason);
1482 pVCpu->hwaccm.s.paStatExitReason = NULL;
1483 pVCpu->hwaccm.s.paStatExitReasonR0 = NIL_RTR0PTR;
1484 }
1485 if (pVCpu->hwaccm.s.paStatInjectedIrqs)
1486 {
1487 MMHyperFree(pVM, pVCpu->hwaccm.s.paStatInjectedIrqs);
1488 pVCpu->hwaccm.s.paStatInjectedIrqs = NULL;
1489 pVCpu->hwaccm.s.paStatInjectedIrqsR0 = NIL_RTR0PTR;
1490 }
1491#endif
1492
1493#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1494 memset(pVCpu->hwaccm.s.vmx.VMCSCache.aMagic, 0, sizeof(pVCpu->hwaccm.s.vmx.VMCSCache.aMagic));
1495 pVCpu->hwaccm.s.vmx.VMCSCache.uMagic = 0;
1496 pVCpu->hwaccm.s.vmx.VMCSCache.uPos = 0xffffffff;
1497#endif
1498 }
1499 return 0;
1500}
1501
1502/**
1503 * Resets a virtual CPU.
1504 *
1505 * Used by HWACCMR3Reset and CPU hot plugging.
1506 *
1507 * @param pVCpu The CPU to reset.
1508 */
1509VMMR3DECL(void) HWACCMR3ResetCpu(PVMCPU pVCpu)
1510{
1511 /* On first entry we'll sync everything. */
1512 pVCpu->hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
1513
1514 pVCpu->hwaccm.s.vmx.cr0_mask = 0;
1515 pVCpu->hwaccm.s.vmx.cr4_mask = 0;
1516
1517 pVCpu->hwaccm.s.fActive = false;
1518 pVCpu->hwaccm.s.Event.fPending = false;
1519
1520 /* Reset state information for real-mode emulation in VT-x. */
1521 pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
1522 pVCpu->hwaccm.s.vmx.enmPrevGuestMode = PGMMODE_REAL;
1523 pVCpu->hwaccm.s.vmx.enmCurrGuestMode = PGMMODE_REAL;
1524
1525 /* Reset the contents of the read cache. */
1526 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
1527 for (unsigned j=0;j<pCache->Read.cValidEntries;j++)
1528 pCache->Read.aFieldVal[j] = 0;
1529
1530#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1531 /* Magic marker for searching in crash dumps. */
1532 strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
1533 pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
1534#endif
1535}
1536
1537/**
1538 * The VM is being reset.
1539 *
1540 * For the HWACCM component this means that any GDT/LDT/TSS monitors
1541 * needs to be removed.
1542 *
1543 * @param pVM VM handle.
1544 */
1545VMMR3DECL(void) HWACCMR3Reset(PVM pVM)
1546{
1547 LogFlow(("HWACCMR3Reset:\n"));
1548
1549 if (pVM->fHWACCMEnabled)
1550 hwaccmR3DisableRawMode(pVM);
1551
1552 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1553 {
1554 PVMCPU pVCpu = &pVM->aCpus[i];
1555
1556 HWACCMR3ResetCpu(pVCpu);
1557 }
1558
1559 /* Clear all patch information. */
1560 pVM->hwaccm.s.pGuestPatchMem = 0;
1561 pVM->hwaccm.s.pFreeGuestPatchMem = 0;
1562 pVM->hwaccm.s.cbGuestPatchMem = 0;
1563 pVM->hwaccm.s.cPatches = 0;
1564 pVM->hwaccm.s.PatchTree = 0;
1565 pVM->hwaccm.s.fTPRPatchingActive = false;
1566 ASMMemZero32(pVM->hwaccm.s.aPatches, sizeof(pVM->hwaccm.s.aPatches));
1567}
1568
1569/**
1570 * Callback to patch a TPR instruction (vmmcall or mov cr8)
1571 *
1572 * @returns VBox strict status code.
1573 * @param pVM The VM handle.
1574 * @param pVCpu The VMCPU for the EMT we're being called on.
1575 * @param pvUser Unused
1576 *
1577 */
1578DECLCALLBACK(VBOXSTRICTRC) hwaccmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
1579{
1580 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
1581
1582 /* Only execute the handler on the VCPU the original patch request was issued. */
1583 if (pVCpu->idCpu != idCpu)
1584 return VINF_SUCCESS;
1585
1586 Log(("hwaccmR3RemovePatches\n"));
1587 for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
1588 {
1589 uint8_t szInstr[15];
1590 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
1591 RTGCPTR pInstrGC = (RTGCPTR)pPatch->Core.Key;
1592 int rc;
1593
1594#ifdef LOG_ENABLED
1595 char szOutput[256];
1596
1597 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, 0, szOutput, sizeof(szOutput), 0);
1598 if (VBOX_SUCCESS(rc))
1599 Log(("Patched instr: %s\n", szOutput));
1600#endif
1601
1602 /* Check if the instruction is still the same. */
1603 rc = PGMPhysSimpleReadGCPtr(pVCpu, szInstr, pInstrGC, pPatch->cbNewOp);
1604 if (rc != VINF_SUCCESS)
1605 {
1606 Log(("Patched code removed? (rc=%Rrc0\n", rc));
1607 continue; /* swapped out or otherwise removed; skip it. */
1608 }
1609
1610 if (memcmp(szInstr, pPatch->aNewOpcode, pPatch->cbNewOp))
1611 {
1612 Log(("Patched instruction was changed! (rc=%Rrc0\n", rc));
1613 continue; /* skip it. */
1614 }
1615
1616 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pInstrGC, pPatch->aOpcode, pPatch->cbOp);
1617 AssertRC(rc);
1618
1619#ifdef LOG_ENABLED
1620 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, 0, szOutput, sizeof(szOutput), 0);
1621 if (VBOX_SUCCESS(rc))
1622 Log(("Original instr: %s\n", szOutput));
1623#endif
1624 }
1625 pVM->hwaccm.s.cPatches = 0;
1626 pVM->hwaccm.s.PatchTree = 0;
1627 pVM->hwaccm.s.pFreeGuestPatchMem = pVM->hwaccm.s.pGuestPatchMem;
1628 pVM->hwaccm.s.fTPRPatchingActive = false;
1629 return VINF_SUCCESS;
1630}
1631
1632/**
1633 * Enable patching in a VT-x/AMD-V guest
1634 *
1635 * @returns VBox status code.
1636 * @param pVM The VM to operate on.
1637 * @param idCpu VCPU to execute hwaccmR3RemovePatches on
1638 * @param pPatchMem Patch memory range
1639 * @param cbPatchMem Size of the memory range
1640 */
1641int hwaccmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
1642{
1643 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hwaccmR3RemovePatches, (void *)idCpu);
1644 AssertRC(rc);
1645
1646 pVM->hwaccm.s.pGuestPatchMem = pPatchMem;
1647 pVM->hwaccm.s.pFreeGuestPatchMem = pPatchMem;
1648 pVM->hwaccm.s.cbGuestPatchMem = cbPatchMem;
1649 return VINF_SUCCESS;
1650}
1651
1652/**
1653 * Enable patching in a VT-x/AMD-V guest
1654 *
1655 * @returns VBox status code.
1656 * @param pVM The VM to operate on.
1657 * @param pPatchMem Patch memory range
1658 * @param cbPatchMem Size of the memory range
1659 */
1660VMMR3DECL(int) HWACMMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1661{
1662 Log(("HWACMMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
1663 if (pVM->cCpus > 1)
1664 {
1665 /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
1666 int rc = VMR3ReqCallNoWaitU(pVM->pUVM, VMCPUID_ANY_QUEUE,
1667 (PFNRT)hwaccmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
1668 AssertRC(rc);
1669 return rc;
1670 }
1671 return hwaccmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
1672}
1673
1674/**
1675 * Disable patching in a VT-x/AMD-V guest
1676 *
1677 * @returns VBox status code.
1678 * @param pVM The VM to operate on.
1679 * @param pPatchMem Patch memory range
1680 * @param cbPatchMem Size of the memory range
1681 */
1682VMMR3DECL(int) HWACMMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1683{
1684 Log(("HWACMMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
1685
1686 Assert(pVM->hwaccm.s.pGuestPatchMem == pPatchMem);
1687 Assert(pVM->hwaccm.s.cbGuestPatchMem == cbPatchMem);
1688
1689 /* @todo Potential deadlock when other VCPUs are waiting on the IOM lock (we own it)!! */
1690 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hwaccmR3RemovePatches, (void *)VMMGetCpuId(pVM));
1691 AssertRC(rc);
1692
1693 pVM->hwaccm.s.pGuestPatchMem = 0;
1694 pVM->hwaccm.s.pFreeGuestPatchMem = 0;
1695 pVM->hwaccm.s.cbGuestPatchMem = 0;
1696 pVM->hwaccm.s.fTPRPatchingActive = false;
1697 return VINF_SUCCESS;
1698}
1699
1700
1701/**
1702 * Callback to patch a TPR instruction (vmmcall or mov cr8)
1703 *
1704 * @returns VBox strict status code.
1705 * @param pVM The VM handle.
1706 * @param pVCpu The VMCPU for the EMT we're being called on.
1707 * @param pvUser User specified CPU context
1708 *
1709 */
1710DECLCALLBACK(VBOXSTRICTRC) hwaccmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
1711{
1712 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
1713 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1714 PDISCPUSTATE pDis = &pVCpu->hwaccm.s.DisState;
1715 unsigned cbOp;
1716
1717 /* Only execute the handler on the VCPU the original patch request was issued. (the other CPU(s) might not yet have switched to protected mode) */
1718 if (pVCpu->idCpu != idCpu)
1719 return VINF_SUCCESS;
1720
1721 Log(("hwaccmR3ReplaceTprInstr: %RGv\n", pCtx->rip));
1722
1723 /* Two or more VCPUs were racing to patch this instruction. */
1724 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
1725 if (pPatch)
1726 return VINF_SUCCESS;
1727
1728 Assert(pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches));
1729
1730 int rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
1731 AssertRC(rc);
1732 if ( rc == VINF_SUCCESS
1733 && pDis->pCurInstr->opcode == OP_MOV
1734 && cbOp >= 3)
1735 {
1736 uint8_t aVMMCall[3] = { 0xf, 0x1, 0xd9};
1737 uint32_t idx = pVM->hwaccm.s.cPatches;
1738
1739 pPatch = &pVM->hwaccm.s.aPatches[idx];
1740
1741 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
1742 AssertRC(rc);
1743
1744 pPatch->cbOp = cbOp;
1745
1746 if (pDis->param1.flags == USE_DISPLACEMENT32)
1747 {
1748 /* write. */
1749 if (pDis->param2.flags == USE_REG_GEN32)
1750 {
1751 pPatch->enmType = HWACCMTPRINSTR_WRITE_REG;
1752 pPatch->uSrcOperand = pDis->param2.base.reg_gen;
1753 }
1754 else
1755 {
1756 Assert(pDis->param2.flags == USE_IMMEDIATE32);
1757 pPatch->enmType = HWACCMTPRINSTR_WRITE_IMM;
1758 pPatch->uSrcOperand = pDis->param2.parval;
1759 }
1760 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, aVMMCall, sizeof(aVMMCall));
1761 AssertRC(rc);
1762
1763 memcpy(pPatch->aNewOpcode, aVMMCall, sizeof(aVMMCall));
1764 pPatch->cbNewOp = sizeof(aVMMCall);
1765 }
1766 else
1767 {
1768 RTGCPTR oldrip = pCtx->rip;
1769 uint32_t oldcbOp = cbOp;
1770 uint32_t uMmioReg = pDis->param1.base.reg_gen;
1771
1772 /* read */
1773 Assert(pDis->param1.flags == USE_REG_GEN32);
1774
1775 /* Found:
1776 * mov eax, dword [fffe0080] (5 bytes)
1777 * Check if next instruction is:
1778 * shr eax, 4
1779 */
1780 pCtx->rip += cbOp;
1781 rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
1782 pCtx->rip = oldrip;
1783 if ( rc == VINF_SUCCESS
1784 && pDis->pCurInstr->opcode == OP_SHR
1785 && pDis->param1.flags == USE_REG_GEN32
1786 && pDis->param1.base.reg_gen == uMmioReg
1787 && pDis->param2.flags == USE_IMMEDIATE8
1788 && pDis->param2.parval == 4
1789 && oldcbOp + cbOp < sizeof(pVM->hwaccm.s.aPatches[idx].aOpcode))
1790 {
1791 uint8_t szInstr[15];
1792
1793 /* Replacing two instructions now. */
1794 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pPatch->aOpcode, pCtx->rip, oldcbOp + cbOp);
1795 AssertRC(rc);
1796
1797 pPatch->cbOp = oldcbOp + cbOp;
1798
1799 /* 0xF0, 0x0F, 0x20, 0xC0 = mov eax, cr8 */
1800 szInstr[0] = 0xF0;
1801 szInstr[1] = 0x0F;
1802 szInstr[2] = 0x20;
1803 szInstr[3] = 0xC0 | pDis->param1.base.reg_gen;
1804 for (unsigned i = 4; i < pPatch->cbOp; i++)
1805 szInstr[i] = 0x90; /* nop */
1806
1807 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, szInstr, pPatch->cbOp);
1808 AssertRC(rc);
1809
1810 memcpy(pPatch->aNewOpcode, szInstr, pPatch->cbOp);
1811 pPatch->cbNewOp = pPatch->cbOp;
1812
1813 Log(("Acceptable read/shr candidate!\n"));
1814 pPatch->enmType = HWACCMTPRINSTR_READ_SHR4;
1815 }
1816 else
1817 {
1818 pPatch->enmType = HWACCMTPRINSTR_READ;
1819 pPatch->uDstOperand = pDis->param1.base.reg_gen;
1820
1821 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, aVMMCall, sizeof(aVMMCall));
1822 AssertRC(rc);
1823
1824 memcpy(pPatch->aNewOpcode, aVMMCall, sizeof(aVMMCall));
1825 pPatch->cbNewOp = sizeof(aVMMCall);
1826 }
1827 }
1828
1829 pPatch->Core.Key = pCtx->eip;
1830 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
1831 AssertRC(rc);
1832
1833 pVM->hwaccm.s.cPatches++;
1834 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceSuccess);
1835 return VINF_SUCCESS;
1836 }
1837
1838 /* Save invalid patch, so we will not try again. */
1839 uint32_t idx = pVM->hwaccm.s.cPatches;
1840
1841#ifdef LOG_ENABLED
1842 char szOutput[256];
1843 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, 0, szOutput, sizeof(szOutput), 0);
1844 if (VBOX_SUCCESS(rc))
1845 Log(("Failed to patch instr: %s\n", szOutput));
1846#endif
1847
1848 pPatch = &pVM->hwaccm.s.aPatches[idx];
1849 pPatch->Core.Key = pCtx->eip;
1850 pPatch->enmType = HWACCMTPRINSTR_INVALID;
1851 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
1852 AssertRC(rc);
1853 pVM->hwaccm.s.cPatches++;
1854 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceFailure);
1855 return VINF_SUCCESS;
1856}
1857
1858/**
1859 * Callback to patch a TPR instruction (jump to generated code)
1860 *
1861 * @returns VBox strict status code.
1862 * @param pVM The VM handle.
1863 * @param pVCpu The VMCPU for the EMT we're being called on.
1864 * @param pvUser User specified CPU context
1865 *
1866 */
1867DECLCALLBACK(VBOXSTRICTRC) hwaccmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
1868{
1869 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
1870 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1871 PDISCPUSTATE pDis = &pVCpu->hwaccm.s.DisState;
1872 unsigned cbOp;
1873 int rc;
1874#ifdef LOG_ENABLED
1875 RTGCPTR pInstr;
1876 char szOutput[256];
1877#endif
1878
1879 /* Only execute the handler on the VCPU the original patch request was issued. (the other CPU(s) might not yet have switched to protected mode) */
1880 if (pVCpu->idCpu != idCpu)
1881 return VINF_SUCCESS;
1882
1883 Assert(pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches));
1884
1885 /* Two or more VCPUs were racing to patch this instruction. */
1886 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
1887 if (pPatch)
1888 {
1889 Log(("hwaccmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
1890 return VINF_SUCCESS;
1891 }
1892
1893 Log(("hwaccmR3PatchTprInstr %RGv\n", pCtx->rip));
1894
1895 rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
1896 AssertRC(rc);
1897 if ( rc == VINF_SUCCESS
1898 && pDis->pCurInstr->opcode == OP_MOV
1899 && cbOp >= 5)
1900 {
1901 uint32_t idx = pVM->hwaccm.s.cPatches;
1902 uint8_t aPatch[64];
1903 uint32_t off = 0;
1904
1905 pPatch = &pVM->hwaccm.s.aPatches[idx];
1906
1907#ifdef LOG_ENABLED
1908 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, 0, szOutput, sizeof(szOutput), 0);
1909 if (VBOX_SUCCESS(rc))
1910 Log(("Original instr: %s\n", szOutput));
1911#endif
1912
1913 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
1914 AssertRC(rc);
1915
1916 pPatch->cbOp = cbOp;
1917 pPatch->enmType = HWACCMTPRINSTR_JUMP_REPLACEMENT;
1918
1919 if (pDis->param1.flags == USE_DISPLACEMENT32)
1920 {
1921 /*
1922 * TPR write:
1923 *
1924 * push ECX [51]
1925 * push EDX [52]
1926 * push EAX [50]
1927 * xor EDX,EDX [31 D2]
1928 * mov EAX,EAX [89 C0]
1929 * or
1930 * mov EAX,0000000CCh [B8 CC 00 00 00]
1931 * mov ECX,0C0000082h [B9 82 00 00 C0]
1932 * wrmsr [0F 30]
1933 * pop EAX [58]
1934 * pop EDX [5A]
1935 * pop ECX [59]
1936 * jmp return_address [E9 return_address]
1937 *
1938 */
1939 bool fUsesEax = (pDis->param2.flags == USE_REG_GEN32 && pDis->param2.base.reg_gen == USE_REG_EAX);
1940
1941 aPatch[off++] = 0x51; /* push ecx */
1942 aPatch[off++] = 0x52; /* push edx */
1943 if (!fUsesEax)
1944 aPatch[off++] = 0x50; /* push eax */
1945 aPatch[off++] = 0x31; /* xor edx, edx */
1946 aPatch[off++] = 0xD2;
1947 if (pDis->param2.flags == USE_REG_GEN32)
1948 {
1949 if (!fUsesEax)
1950 {
1951 aPatch[off++] = 0x89; /* mov eax, src_reg */
1952 aPatch[off++] = MAKE_MODRM(3, pDis->param2.base.reg_gen, USE_REG_EAX);
1953 }
1954 }
1955 else
1956 {
1957 Assert(pDis->param2.flags == USE_IMMEDIATE32);
1958 aPatch[off++] = 0xB8; /* mov eax, immediate */
1959 *(uint32_t *)&aPatch[off] = pDis->param2.parval;
1960 off += sizeof(uint32_t);
1961 }
1962 aPatch[off++] = 0xB9; /* mov ecx, 0xc0000082 */
1963 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
1964 off += sizeof(uint32_t);
1965
1966 aPatch[off++] = 0x0F; /* wrmsr */
1967 aPatch[off++] = 0x30;
1968 if (!fUsesEax)
1969 aPatch[off++] = 0x58; /* pop eax */
1970 aPatch[off++] = 0x5A; /* pop edx */
1971 aPatch[off++] = 0x59; /* pop ecx */
1972 }
1973 else
1974 {
1975 /*
1976 * TPR read:
1977 *
1978 * push ECX [51]
1979 * push EDX [52]
1980 * push EAX [50]
1981 * mov ECX,0C0000082h [B9 82 00 00 C0]
1982 * rdmsr [0F 32]
1983 * mov EAX,EAX [89 C0]
1984 * pop EAX [58]
1985 * pop EDX [5A]
1986 * pop ECX [59]
1987 * jmp return_address [E9 return_address]
1988 *
1989 */
1990 Assert(pDis->param1.flags == USE_REG_GEN32);
1991
1992 if (pDis->param1.base.reg_gen != USE_REG_ECX)
1993 aPatch[off++] = 0x51; /* push ecx */
1994 if (pDis->param1.base.reg_gen != USE_REG_EDX)
1995 aPatch[off++] = 0x52; /* push edx */
1996 if (pDis->param1.base.reg_gen != USE_REG_EAX)
1997 aPatch[off++] = 0x50; /* push eax */
1998
1999 aPatch[off++] = 0x31; /* xor edx, edx */
2000 aPatch[off++] = 0xD2;
2001
2002 aPatch[off++] = 0xB9; /* mov ecx, 0xc0000082 */
2003 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
2004 off += sizeof(uint32_t);
2005
2006 aPatch[off++] = 0x0F; /* rdmsr */
2007 aPatch[off++] = 0x32;
2008
2009 if (pDis->param1.base.reg_gen != USE_REG_EAX)
2010 {
2011 aPatch[off++] = 0x89; /* mov dst_reg, eax */
2012 aPatch[off++] = MAKE_MODRM(3, USE_REG_EAX, pDis->param1.base.reg_gen);
2013 }
2014
2015 if (pDis->param1.base.reg_gen != USE_REG_EAX)
2016 aPatch[off++] = 0x58; /* pop eax */
2017 if (pDis->param1.base.reg_gen != USE_REG_EDX)
2018 aPatch[off++] = 0x5A; /* pop edx */
2019 if (pDis->param1.base.reg_gen != USE_REG_ECX)
2020 aPatch[off++] = 0x59; /* pop ecx */
2021 }
2022 aPatch[off++] = 0xE9; /* jmp return_address */
2023 *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hwaccm.s.pFreeGuestPatchMem + off + 4);
2024 off += sizeof(RTRCUINTPTR);
2025
2026 if (pVM->hwaccm.s.pFreeGuestPatchMem + off <= pVM->hwaccm.s.pGuestPatchMem + pVM->hwaccm.s.cbGuestPatchMem)
2027 {
2028 /* Write new code to the patch buffer. */
2029 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hwaccm.s.pFreeGuestPatchMem, aPatch, off);
2030 AssertRC(rc);
2031
2032#ifdef LOG_ENABLED
2033 pInstr = pVM->hwaccm.s.pFreeGuestPatchMem;
2034 while (true)
2035 {
2036 uint32_t cb;
2037
2038 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pInstr, 0, szOutput, sizeof(szOutput), &cb);
2039 if (VBOX_SUCCESS(rc))
2040 Log(("Patch instr %s\n", szOutput));
2041
2042 pInstr += cb;
2043
2044 if (pInstr >= pVM->hwaccm.s.pFreeGuestPatchMem + off)
2045 break;
2046 }
2047#endif
2048
2049 pPatch->aNewOpcode[0] = 0xE9;
2050 *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hwaccm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
2051
2052 /* Overwrite the TPR instruction with a jump. */
2053 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->eip, pPatch->aNewOpcode, 5);
2054 AssertRC(rc);
2055
2056#ifdef LOG_ENABLED
2057 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, 0, szOutput, sizeof(szOutput), 0);
2058 if (VBOX_SUCCESS(rc))
2059 Log(("Jump: %s\n", szOutput));
2060#endif
2061 pVM->hwaccm.s.pFreeGuestPatchMem += off;
2062 pPatch->cbNewOp = 5;
2063
2064 pPatch->Core.Key = pCtx->eip;
2065 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
2066 AssertRC(rc);
2067
2068 pVM->hwaccm.s.cPatches++;
2069 pVM->hwaccm.s.fTPRPatchingActive = true;
2070 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchSuccess);
2071 return VINF_SUCCESS;
2072 }
2073 else
2074 Log(("Ran out of space in our patch buffer!\n"));
2075 }
2076
2077 /* Save invalid patch, so we will not try again. */
2078 uint32_t idx = pVM->hwaccm.s.cPatches;
2079
2080#ifdef LOG_ENABLED
2081 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, 0, szOutput, sizeof(szOutput), 0);
2082 if (VBOX_SUCCESS(rc))
2083 Log(("Failed to patch instr: %s\n", szOutput));
2084#endif
2085
2086 pPatch = &pVM->hwaccm.s.aPatches[idx];
2087 pPatch->Core.Key = pCtx->eip;
2088 pPatch->enmType = HWACCMTPRINSTR_INVALID;
2089 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
2090 AssertRC(rc);
2091 pVM->hwaccm.s.cPatches++;
2092 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchFailure);
2093 return VINF_SUCCESS;
2094}
2095
2096/**
2097 * Attempt to patch TPR mmio instructions
2098 *
2099 * @returns VBox status code.
2100 * @param pVM The VM to operate on.
2101 * @param pVCpu The VM CPU to operate on.
2102 * @param pCtx CPU context
2103 */
2104VMMR3DECL(int) HWACCMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2105{
2106 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, (pVM->hwaccm.s.pGuestPatchMem) ? hwaccmR3PatchTprInstr : hwaccmR3ReplaceTprInstr, (void *)pVCpu->idCpu);
2107 AssertRC(rc);
2108 return rc;
2109}
2110
2111/**
2112 * Force execution of the current IO code in the recompiler
2113 *
2114 * @returns VBox status code.
2115 * @param pVM The VM to operate on.
2116 * @param pCtx Partial VM execution context
2117 */
2118VMMR3DECL(int) HWACCMR3EmulateIoBlock(PVM pVM, PCPUMCTX pCtx)
2119{
2120 PVMCPU pVCpu = VMMGetCpu(pVM);
2121
2122 Assert(pVM->fHWACCMEnabled);
2123 Log(("HWACCMR3EmulateIoBlock\n"));
2124
2125 /* This is primarily intended to speed up Grub, so we don't care about paged protected mode. */
2126 if (HWACCMCanEmulateIoBlockEx(pCtx))
2127 {
2128 Log(("HWACCMR3EmulateIoBlock -> enabled\n"));
2129 pVCpu->hwaccm.s.EmulateIoBlock.fEnabled = true;
2130 pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip = pCtx->rip;
2131 pVCpu->hwaccm.s.EmulateIoBlock.cr0 = pCtx->cr0;
2132 return VINF_EM_RESCHEDULE_REM;
2133 }
2134 return VINF_SUCCESS;
2135}
2136
2137/**
2138 * Checks if we can currently use hardware accelerated raw mode.
2139 *
2140 * @returns boolean
2141 * @param pVM The VM to operate on.
2142 * @param pCtx Partial VM execution context
2143 */
2144VMMR3DECL(bool) HWACCMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)
2145{
2146 PVMCPU pVCpu = VMMGetCpu(pVM);
2147
2148 Assert(pVM->fHWACCMEnabled);
2149
2150 /* If we're still executing the IO code, then return false. */
2151 if ( RT_UNLIKELY(pVCpu->hwaccm.s.EmulateIoBlock.fEnabled)
2152 && pCtx->rip < pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip + 0x200
2153 && pCtx->rip > pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip - 0x200
2154 && pCtx->cr0 == pVCpu->hwaccm.s.EmulateIoBlock.cr0)
2155 return false;
2156
2157 pVCpu->hwaccm.s.EmulateIoBlock.fEnabled = false;
2158
2159 /* AMD-V supports real & protected mode with or without paging. */
2160 if (pVM->hwaccm.s.svm.fEnabled)
2161 {
2162 pVCpu->hwaccm.s.fActive = true;
2163 return true;
2164 }
2165
2166 pVCpu->hwaccm.s.fActive = false;
2167
2168 /* Note! The context supplied by REM is partial. If we add more checks here, be sure to verify that REM provides this info! */
2169#ifdef HWACCM_VMX_EMULATE_REALMODE
2170 bool fVMMDeviceHeapEnabled = PDMVMMDevHeapIsEnabled(pVM);
2171
2172 Assert((pVM->hwaccm.s.vmx.fUnrestrictedGuest && !pVM->hwaccm.s.vmx.pRealModeTSS) || (!pVM->hwaccm.s.vmx.fUnrestrictedGuest && pVM->hwaccm.s.vmx.pRealModeTSS));
2173
2174 /** The VMM device heap is a requirement for emulating real mode or protected mode without paging when the unrestricted guest execution feature is missing. */
2175 if (fVMMDeviceHeapEnabled)
2176 {
2177 if (CPUMIsGuestInRealModeEx(pCtx))
2178 {
2179 /* VT-x will not allow high selector bases in v86 mode; fall back to the recompiler in that case.
2180 * The base must also be equal to (sel << 4).
2181 */
2182 if ( ( pCtx->cs != (pCtx->csHid.u64Base >> 4)
2183 && pCtx->csHid.u64Base != 0xffff0000 /* we can deal with the BIOS code as it's also mapped into the lower region. */)
2184 || pCtx->ds != (pCtx->dsHid.u64Base >> 4)
2185 || pCtx->es != (pCtx->esHid.u64Base >> 4)
2186 || pCtx->fs != (pCtx->fsHid.u64Base >> 4)
2187 || pCtx->gs != (pCtx->gsHid.u64Base >> 4)
2188 || pCtx->ss != (pCtx->ssHid.u64Base >> 4))
2189 {
2190 return false;
2191 }
2192 }
2193 else
2194 {
2195 PGMMODE enmGuestMode = PGMGetGuestMode(pVCpu);
2196 /* Verify the requirements for executing code in protected mode. VT-x can't handle the CPU state right after a switch
2197 * from real to protected mode. (all sorts of RPL & DPL assumptions)
2198 */
2199 if ( pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
2200 && enmGuestMode >= PGMMODE_PROTECTED)
2201 {
2202 if ( (pCtx->cs & X86_SEL_RPL)
2203 || (pCtx->ds & X86_SEL_RPL)
2204 || (pCtx->es & X86_SEL_RPL)
2205 || (pCtx->fs & X86_SEL_RPL)
2206 || (pCtx->gs & X86_SEL_RPL)
2207 || (pCtx->ss & X86_SEL_RPL))
2208 {
2209 return false;
2210 }
2211 }
2212 }
2213 }
2214 else
2215#endif /* HWACCM_VMX_EMULATE_REALMODE */
2216 {
2217 if ( !CPUMIsGuestInLongModeEx(pCtx)
2218 && !pVM->hwaccm.s.vmx.fUnrestrictedGuest)
2219 {
2220 /** @todo This should (probably) be set on every excursion to the REM,
2221 * however it's too risky right now. So, only apply it when we go
2222 * back to REM for real mode execution. (The XP hack below doesn't
2223 * work reliably without this.)
2224 * Update: Implemented in EM.cpp, see #ifdef EM_NOTIFY_HWACCM. */
2225 pVM->aCpus[0].hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
2226
2227 if ( !pVM->hwaccm.s.fNestedPaging /* requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap*/
2228 || CPUMIsGuestInRealModeEx(pCtx)) /* requires a fake TSS for real mode - stored in the VMM device heap */
2229 return false;
2230
2231 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
2232 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr == 0)
2233 return false;
2234
2235 /* The guest is about to complete the switch to protected mode. Wait a bit longer. */
2236 /* Windows XP; switch to protected mode; all selectors are marked not present in the
2237 * hidden registers (possible recompiler bug; see load_seg_vm) */
2238 if (pCtx->csHid.Attr.n.u1Present == 0)
2239 return false;
2240 if (pCtx->ssHid.Attr.n.u1Present == 0)
2241 return false;
2242
2243 /* Windows XP: possible same as above, but new recompiler requires new heuristics?
2244 VT-x doesn't seem to like something about the guest state and this stuff avoids it. */
2245 /** @todo This check is actually wrong, it doesn't take the direction of the
2246 * stack segment into account. But, it does the job for now. */
2247 if (pCtx->rsp >= pCtx->ssHid.u32Limit)
2248 return false;
2249#if 0
2250 if ( pCtx->cs >= pCtx->gdtr.cbGdt
2251 || pCtx->ss >= pCtx->gdtr.cbGdt
2252 || pCtx->ds >= pCtx->gdtr.cbGdt
2253 || pCtx->es >= pCtx->gdtr.cbGdt
2254 || pCtx->fs >= pCtx->gdtr.cbGdt
2255 || pCtx->gs >= pCtx->gdtr.cbGdt)
2256 return false;
2257#endif
2258 }
2259 }
2260
2261 if (pVM->hwaccm.s.vmx.fEnabled)
2262 {
2263 uint32_t mask;
2264
2265 if (!pVM->hwaccm.s.vmx.fUnrestrictedGuest)
2266 {
2267 /* if bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
2268 mask = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0;
2269 /* Note: We ignore the NE bit here on purpose; see vmmr0\hwaccmr0.cpp for details. */
2270 mask &= ~X86_CR0_NE;
2271
2272#ifdef HWACCM_VMX_EMULATE_REALMODE
2273 if (fVMMDeviceHeapEnabled)
2274 {
2275 /* Note: We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
2276 mask &= ~(X86_CR0_PG|X86_CR0_PE);
2277 }
2278 else
2279#endif
2280 {
2281 /* We support protected mode without paging using identity mapping. */
2282 mask &= ~X86_CR0_PG;
2283 }
2284 if ((pCtx->cr0 & mask) != mask)
2285 return false;
2286
2287 /* if bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
2288 mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1;
2289 if ((pCtx->cr0 & mask) != 0)
2290 return false;
2291 }
2292
2293 /* if bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
2294 mask = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0;
2295 mask &= ~X86_CR4_VMXE;
2296 if ((pCtx->cr4 & mask) != mask)
2297 return false;
2298
2299 /* if bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
2300 mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1;
2301 if ((pCtx->cr4 & mask) != 0)
2302 return false;
2303
2304 pVCpu->hwaccm.s.fActive = true;
2305 return true;
2306 }
2307
2308 return false;
2309}
2310
2311/**
2312 * Checks if we need to reschedule due to VMM device heap changes
2313 *
2314 * @returns boolean
2315 * @param pVM The VM to operate on.
2316 * @param pCtx VM execution context
2317 */
2318VMMR3DECL(bool) HWACCMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx)
2319{
2320 /** The VMM device heap is a requirement for emulating real mode or protected mode without paging when the unrestricted guest execution feature is missing. (VT-x only) */
2321 if ( pVM->hwaccm.s.vmx.fEnabled
2322 && !CPUMIsGuestInPagedProtectedModeEx(pCtx)
2323 && !PDMVMMDevHeapIsEnabled(pVM)
2324 && (pVM->hwaccm.s.fNestedPaging || CPUMIsGuestInRealModeEx(pCtx)))
2325 return true;
2326
2327 return false;
2328}
2329
2330
2331/**
2332 * Notifcation from EM about a rescheduling into hardware assisted execution
2333 * mode.
2334 *
2335 * @param pVCpu Pointer to the current virtual cpu structure.
2336 */
2337VMMR3DECL(void) HWACCMR3NotifyScheduled(PVMCPU pVCpu)
2338{
2339 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
2340}
2341
2342/**
2343 * Notifcation from EM about returning from instruction emulation (REM / EM).
2344 *
2345 * @param pVCpu Pointer to the current virtual cpu structure.
2346 */
2347VMMR3DECL(void) HWACCMR3NotifyEmulated(PVMCPU pVCpu)
2348{
2349 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
2350}
2351
2352/**
2353 * Checks if we are currently using hardware accelerated raw mode.
2354 *
2355 * @returns boolean
2356 * @param pVCpu The VMCPU to operate on.
2357 */
2358VMMR3DECL(bool) HWACCMR3IsActive(PVMCPU pVCpu)
2359{
2360 return pVCpu->hwaccm.s.fActive;
2361}
2362
2363/**
2364 * Checks if we are currently using nested paging.
2365 *
2366 * @returns boolean
2367 * @param pVM The VM to operate on.
2368 */
2369VMMR3DECL(bool) HWACCMR3IsNestedPagingActive(PVM pVM)
2370{
2371 return pVM->hwaccm.s.fNestedPaging;
2372}
2373
2374/**
2375 * Checks if we are currently using VPID in VT-x mode.
2376 *
2377 * @returns boolean
2378 * @param pVM The VM to operate on.
2379 */
2380VMMR3DECL(bool) HWACCMR3IsVPIDActive(PVM pVM)
2381{
2382 return pVM->hwaccm.s.vmx.fVPID;
2383}
2384
2385
2386/**
2387 * Checks if internal events are pending. In that case we are not allowed to dispatch interrupts.
2388 *
2389 * @returns boolean
2390 * @param pVM The VM to operate on.
2391 */
2392VMMR3DECL(bool) HWACCMR3IsEventPending(PVMCPU pVCpu)
2393{
2394 return HWACCMIsEnabled(pVCpu->pVMR3) && pVCpu->hwaccm.s.Event.fPending;
2395}
2396
2397/**
2398 * Restart an I/O instruction that was refused in ring-0
2399 *
2400 * @returns Strict VBox status code. Informational status codes other than the one documented
2401 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2402 * @retval VINF_SUCCESS Success.
2403 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2404 * status code must be passed on to EM.
2405 * @retval VERR_NOT_FOUND if no pending I/O instruction.
2406 *
2407 * @param pVM The VM to operate on.
2408 * @param pVCpu The VMCPU to operate on.
2409 * @param pCtx VCPU register context
2410 */
2411VMMR3DECL(VBOXSTRICTRC) HWACCMR3RestartPendingIOInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2412{
2413 HWACCMPENDINGIO enmType = pVCpu->hwaccm.s.PendingIO.enmType;
2414
2415 pVCpu->hwaccm.s.PendingIO.enmType = HWACCMPENDINGIO_INVALID;
2416
2417 if ( pVCpu->hwaccm.s.PendingIO.GCPtrRip != pCtx->rip
2418 || enmType == HWACCMPENDINGIO_INVALID)
2419 return VERR_NOT_FOUND;
2420
2421 VBOXSTRICTRC rcStrict;
2422 switch (enmType)
2423 {
2424 case HWACCMPENDINGIO_PORT_READ:
2425 {
2426 uint32_t uAndVal = pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal;
2427 uint32_t u32Val = 0;
2428
2429 rcStrict = IOMIOPortRead(pVM, pVCpu->hwaccm.s.PendingIO.s.Port.uPort,
2430 &u32Val,
2431 pVCpu->hwaccm.s.PendingIO.s.Port.cbSize);
2432 if (IOM_SUCCESS(rcStrict))
2433 {
2434 /* Write back to the EAX register. */
2435 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
2436 pCtx->rip = pVCpu->hwaccm.s.PendingIO.GCPtrRipNext;
2437 }
2438 break;
2439 }
2440
2441 case HWACCMPENDINGIO_PORT_WRITE:
2442 rcStrict = IOMIOPortWrite(pVM, pVCpu->hwaccm.s.PendingIO.s.Port.uPort,
2443 pCtx->eax & pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal,
2444 pVCpu->hwaccm.s.PendingIO.s.Port.cbSize);
2445 if (IOM_SUCCESS(rcStrict))
2446 pCtx->rip = pVCpu->hwaccm.s.PendingIO.GCPtrRipNext;
2447 break;
2448
2449 default:
2450 AssertFailed();
2451 return VERR_INTERNAL_ERROR;
2452 }
2453
2454 return rcStrict;
2455}
2456
2457/**
2458 * Inject an NMI into a running VM (only VCPU 0!)
2459 *
2460 * @returns boolean
2461 * @param pVM The VM to operate on.
2462 */
2463VMMR3DECL(int) HWACCMR3InjectNMI(PVM pVM)
2464{
2465 VMCPU_FF_SET(&pVM->aCpus[0], VMCPU_FF_INTERRUPT_NMI);
2466 return VINF_SUCCESS;
2467}
2468
2469/**
2470 * Check fatal VT-x/AMD-V error and produce some meaningful
2471 * log release message.
2472 *
2473 * @param pVM The VM to operate on.
2474 * @param iStatusCode VBox status code
2475 */
2476VMMR3DECL(void) HWACCMR3CheckError(PVM pVM, int iStatusCode)
2477{
2478 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2479 {
2480 switch(iStatusCode)
2481 {
2482 case VERR_VMX_INVALID_VMCS_FIELD:
2483 break;
2484
2485 case VERR_VMX_INVALID_VMCS_PTR:
2486 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys));
2487 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current VMCS version %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulVMCSRevision));
2488 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Entered Cpu %d\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.idEnteredCpu));
2489 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current Cpu %d\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.idCurrentCpu));
2490 break;
2491
2492 case VERR_VMX_UNABLE_TO_START_VM:
2493 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError));
2494 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulExitReason));
2495#if 0 /* @todo dump the current control fields to the release log */
2496 if (pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS)
2497 {
2498
2499 }
2500#endif
2501 break;
2502
2503 case VERR_VMX_UNABLE_TO_RESUME_VM:
2504 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError));
2505 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulExitReason));
2506 break;
2507
2508 case VERR_VMX_INVALID_VMXON_PTR:
2509 break;
2510 }
2511 }
2512}
2513
2514/**
2515 * Execute state save operation.
2516 *
2517 * @returns VBox status code.
2518 * @param pVM VM Handle.
2519 * @param pSSM SSM operation handle.
2520 */
2521static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM)
2522{
2523 int rc;
2524
2525 Log(("hwaccmR3Save:\n"));
2526
2527 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2528 {
2529 /*
2530 * Save the basic bits - fortunately all the other things can be resynced on load.
2531 */
2532 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.fPending);
2533 AssertRCReturn(rc, rc);
2534 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.errCode);
2535 AssertRCReturn(rc, rc);
2536 rc = SSMR3PutU64(pSSM, pVM->aCpus[i].hwaccm.s.Event.intInfo);
2537 AssertRCReturn(rc, rc);
2538
2539 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmLastSeenGuestMode);
2540 AssertRCReturn(rc, rc);
2541 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmCurrGuestMode);
2542 AssertRCReturn(rc, rc);
2543 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmPrevGuestMode);
2544 AssertRCReturn(rc, rc);
2545 }
2546#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
2547 rc = SSMR3PutGCPtr(pSSM, pVM->hwaccm.s.pGuestPatchMem);
2548 AssertRCReturn(rc, rc);
2549 rc = SSMR3PutGCPtr(pSSM, pVM->hwaccm.s.pFreeGuestPatchMem);
2550 AssertRCReturn(rc, rc);
2551 rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.cbGuestPatchMem);
2552 AssertRCReturn(rc, rc);
2553
2554 /* Store all the guest patch records too. */
2555 rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.cPatches);
2556 AssertRCReturn(rc, rc);
2557
2558 for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
2559 {
2560 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
2561
2562 rc = SSMR3PutU32(pSSM, pPatch->Core.Key);
2563 AssertRCReturn(rc, rc);
2564
2565 rc = SSMR3PutMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
2566 AssertRCReturn(rc, rc);
2567
2568 rc = SSMR3PutU32(pSSM, pPatch->cbOp);
2569 AssertRCReturn(rc, rc);
2570
2571 rc = SSMR3PutMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
2572 AssertRCReturn(rc, rc);
2573
2574 rc = SSMR3PutU32(pSSM, pPatch->cbNewOp);
2575 AssertRCReturn(rc, rc);
2576
2577 AssertCompileSize(HWACCMTPRINSTR, 4);
2578 rc = SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType);
2579 AssertRCReturn(rc, rc);
2580
2581 rc = SSMR3PutU32(pSSM, pPatch->uSrcOperand);
2582 AssertRCReturn(rc, rc);
2583
2584 rc = SSMR3PutU32(pSSM, pPatch->uDstOperand);
2585 AssertRCReturn(rc, rc);
2586
2587 rc = SSMR3PutU32(pSSM, pPatch->pJumpTarget);
2588 AssertRCReturn(rc, rc);
2589
2590 rc = SSMR3PutU32(pSSM, pPatch->cFaults);
2591 AssertRCReturn(rc, rc);
2592 }
2593#endif
2594 return VINF_SUCCESS;
2595}
2596
2597/**
2598 * Execute state load operation.
2599 *
2600 * @returns VBox status code.
2601 * @param pVM VM Handle.
2602 * @param pSSM SSM operation handle.
2603 * @param uVersion Data layout version.
2604 * @param uPass The data pass.
2605 */
2606static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2607{
2608 int rc;
2609
2610 Log(("hwaccmR3Load:\n"));
2611 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
2612
2613 /*
2614 * Validate version.
2615 */
2616 if ( uVersion != HWACCM_SSM_VERSION
2617 && uVersion != HWACCM_SSM_VERSION_NO_PATCHING
2618 && uVersion != HWACCM_SSM_VERSION_2_0_X)
2619 {
2620 AssertMsgFailed(("hwaccmR3Load: Invalid version uVersion=%d!\n", uVersion));
2621 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
2622 }
2623 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2624 {
2625 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.fPending);
2626 AssertRCReturn(rc, rc);
2627 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.errCode);
2628 AssertRCReturn(rc, rc);
2629 rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].hwaccm.s.Event.intInfo);
2630 AssertRCReturn(rc, rc);
2631
2632 if (uVersion >= HWACCM_SSM_VERSION_NO_PATCHING)
2633 {
2634 uint32_t val;
2635
2636 rc = SSMR3GetU32(pSSM, &val);
2637 AssertRCReturn(rc, rc);
2638 pVM->aCpus[i].hwaccm.s.vmx.enmLastSeenGuestMode = (PGMMODE)val;
2639
2640 rc = SSMR3GetU32(pSSM, &val);
2641 AssertRCReturn(rc, rc);
2642 pVM->aCpus[i].hwaccm.s.vmx.enmCurrGuestMode = (PGMMODE)val;
2643
2644 rc = SSMR3GetU32(pSSM, &val);
2645 AssertRCReturn(rc, rc);
2646 pVM->aCpus[i].hwaccm.s.vmx.enmPrevGuestMode = (PGMMODE)val;
2647 }
2648 }
2649#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
2650 if (uVersion > HWACCM_SSM_VERSION_NO_PATCHING)
2651 {
2652 rc = SSMR3GetGCPtr(pSSM, &pVM->hwaccm.s.pGuestPatchMem);
2653 AssertRCReturn(rc, rc);
2654 rc = SSMR3GetGCPtr(pSSM, &pVM->hwaccm.s.pFreeGuestPatchMem);
2655 AssertRCReturn(rc, rc);
2656 rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.cbGuestPatchMem);
2657 AssertRCReturn(rc, rc);
2658
2659 /* Fetch all TPR patch records. */
2660 rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.cPatches);
2661 AssertRCReturn(rc, rc);
2662
2663 for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
2664 {
2665 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
2666
2667 rc = SSMR3GetU32(pSSM, &pPatch->Core.Key);
2668 AssertRCReturn(rc, rc);
2669
2670 rc = SSMR3GetMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
2671 AssertRCReturn(rc, rc);
2672
2673 rc = SSMR3GetU32(pSSM, &pPatch->cbOp);
2674 AssertRCReturn(rc, rc);
2675
2676 rc = SSMR3GetMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
2677 AssertRCReturn(rc, rc);
2678
2679 rc = SSMR3GetU32(pSSM, &pPatch->cbNewOp);
2680 AssertRCReturn(rc, rc);
2681
2682 rc = SSMR3GetU32(pSSM, (uint32_t *)&pPatch->enmType);
2683 AssertRCReturn(rc, rc);
2684
2685 if (pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT)
2686 pVM->hwaccm.s.fTPRPatchingActive = true;
2687
2688 Assert(pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT || pVM->hwaccm.s.fTPRPatchingActive == false);
2689
2690 rc = SSMR3GetU32(pSSM, &pPatch->uSrcOperand);
2691 AssertRCReturn(rc, rc);
2692
2693 rc = SSMR3GetU32(pSSM, &pPatch->uDstOperand);
2694 AssertRCReturn(rc, rc);
2695
2696 rc = SSMR3GetU32(pSSM, &pPatch->cFaults);
2697 AssertRCReturn(rc, rc);
2698
2699 rc = SSMR3GetU32(pSSM, &pPatch->pJumpTarget);
2700 AssertRCReturn(rc, rc);
2701
2702 Log(("hwaccmR3Load: patch %d\n", i));
2703 Log(("Key = %x\n", pPatch->Core.Key));
2704 Log(("cbOp = %d\n", pPatch->cbOp));
2705 Log(("cbNewOp = %d\n", pPatch->cbNewOp));
2706 Log(("type = %d\n", pPatch->enmType));
2707 Log(("srcop = %d\n", pPatch->uSrcOperand));
2708 Log(("dstop = %d\n", pPatch->uDstOperand));
2709 Log(("cFaults = %d\n", pPatch->cFaults));
2710 Log(("target = %x\n", pPatch->pJumpTarget));
2711 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
2712 AssertRC(rc);
2713 }
2714 }
2715#endif
2716
2717 /* Recheck all VCPUs if we can go staight into hwaccm execution mode. */
2718 if (HWACCMIsEnabled(pVM))
2719 {
2720 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2721 {
2722 PVMCPU pVCpu = &pVM->aCpus[i];
2723
2724 HWACCMR3CanExecuteGuest(pVM, CPUMQueryGuestCtxPtr(pVCpu));
2725 }
2726 }
2727 return VINF_SUCCESS;
2728}
2729
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette