VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/HM.cpp@ 47202

Last change on this file since 47202 was 47202, checked in by vboxsync, 12 years ago

VMM/HM: comments.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 139.0 KB
Line 
1/* $Id: HM.cpp 47202 2013-07-16 16:55:11Z vboxsync $ */
2/** @file
3 * HM - Intel/AMD VM Hardware Support Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/ssm.h>
29#include <VBox/vmm/trpm.h>
30#include <VBox/vmm/dbgf.h>
31#include <VBox/vmm/iom.h>
32#include <VBox/vmm/patm.h>
33#include <VBox/vmm/csam.h>
34#include <VBox/vmm/selm.h>
35#ifdef VBOX_WITH_REM
36# include <VBox/vmm/rem.h>
37#endif
38#include <VBox/vmm/hm_vmx.h>
39#include <VBox/vmm/hm_svm.h>
40#include "HMInternal.h"
41#include <VBox/vmm/vm.h>
42#include <VBox/vmm/uvm.h>
43#include <VBox/err.h>
44#include <VBox/param.h>
45
46#include <iprt/assert.h>
47#include <VBox/log.h>
48#include <iprt/asm.h>
49#include <iprt/asm-amd64-x86.h>
50#include <iprt/string.h>
51#include <iprt/env.h>
52#include <iprt/thread.h>
53
54
55/*******************************************************************************
56* Global Variables *
57*******************************************************************************/
58#ifdef VBOX_WITH_STATISTICS
59# define EXIT_REASON(def, val, str) #def " - " #val " - " str
60# define EXIT_REASON_NIL() NULL
61/** Exit reason descriptions for VT-x, used to describe statistics. */
62static const char * const g_apszVTxExitReasons[MAX_EXITREASON_STAT] =
63{
64 EXIT_REASON(VMX_EXIT_XCPT_OR_NMI , 0, "Exception or non-maskable interrupt (NMI)."),
65 EXIT_REASON(VMX_EXIT_EXT_INT , 1, "External interrupt."),
66 EXIT_REASON(VMX_EXIT_TRIPLE_FAULT , 2, "Triple fault."),
67 EXIT_REASON(VMX_EXIT_INIT_SIGNAL , 3, "INIT signal."),
68 EXIT_REASON(VMX_EXIT_SIPI , 4, "Start-up IPI (SIPI)."),
69 EXIT_REASON(VMX_EXIT_IO_SMI_IRQ , 5, "I/O system-management interrupt (SMI)."),
70 EXIT_REASON(VMX_EXIT_SMI_IRQ , 6, "Other SMI."),
71 EXIT_REASON(VMX_EXIT_INT_WINDOW , 7, "Interrupt window."),
72 EXIT_REASON_NIL(),
73 EXIT_REASON(VMX_EXIT_TASK_SWITCH , 9, "Task switch."),
74 EXIT_REASON(VMX_EXIT_CPUID , 10, "Guest attempted to execute CPUID."),
75 EXIT_REASON_NIL(),
76 EXIT_REASON(VMX_EXIT_HLT , 12, "Guest attempted to execute HLT."),
77 EXIT_REASON(VMX_EXIT_INVD , 13, "Guest attempted to execute INVD."),
78 EXIT_REASON(VMX_EXIT_INVLPG , 14, "Guest attempted to execute INVLPG."),
79 EXIT_REASON(VMX_EXIT_RDPMC , 15, "Guest attempted to execute RDPMC."),
80 EXIT_REASON(VMX_EXIT_RDTSC , 16, "Guest attempted to execute RDTSC."),
81 EXIT_REASON(VMX_EXIT_RSM , 17, "Guest attempted to execute RSM in SMM."),
82 EXIT_REASON(VMX_EXIT_VMCALL , 18, "Guest attempted to execute VMCALL."),
83 EXIT_REASON(VMX_EXIT_VMCLEAR , 19, "Guest attempted to execute VMCLEAR."),
84 EXIT_REASON(VMX_EXIT_VMLAUNCH , 20, "Guest attempted to execute VMLAUNCH."),
85 EXIT_REASON(VMX_EXIT_VMPTRLD , 21, "Guest attempted to execute VMPTRLD."),
86 EXIT_REASON(VMX_EXIT_VMPTRST , 22, "Guest attempted to execute VMPTRST."),
87 EXIT_REASON(VMX_EXIT_VMREAD , 23, "Guest attempted to execute VMREAD."),
88 EXIT_REASON(VMX_EXIT_VMRESUME , 24, "Guest attempted to execute VMRESUME."),
89 EXIT_REASON(VMX_EXIT_VMWRITE , 25, "Guest attempted to execute VMWRITE."),
90 EXIT_REASON(VMX_EXIT_VMXOFF , 26, "Guest attempted to execute VMXOFF."),
91 EXIT_REASON(VMX_EXIT_VMXON , 27, "Guest attempted to execute VMXON."),
92 EXIT_REASON(VMX_EXIT_MOV_CRX , 28, "Control-register accesses."),
93 EXIT_REASON(VMX_EXIT_MOV_DRX , 29, "Debug-register accesses."),
94 EXIT_REASON(VMX_EXIT_PORT_IO , 30, "I/O instruction."),
95 EXIT_REASON(VMX_EXIT_RDMSR , 31, "Guest attempted to execute RDMSR."),
96 EXIT_REASON(VMX_EXIT_WRMSR , 32, "Guest attempted to execute WRMSR."),
97 EXIT_REASON(VMX_EXIT_ERR_INVALID_GUEST_STATE, 33, "VM-entry failure due to invalid guest state."),
98 EXIT_REASON(VMX_EXIT_ERR_MSR_LOAD , 34, "VM-entry failure due to MSR loading."),
99 EXIT_REASON_NIL(),
100 EXIT_REASON(VMX_EXIT_MWAIT , 36, "Guest executed MWAIT."),
101 EXIT_REASON(VMX_EXIT_MTF , 37, "Monitor Trap Flag."),
102 EXIT_REASON_NIL(),
103 EXIT_REASON(VMX_EXIT_MONITOR , 39, "Guest attempted to execute MONITOR."),
104 EXIT_REASON(VMX_EXIT_PAUSE , 40, "Guest attempted to execute PAUSE."),
105 EXIT_REASON(VMX_EXIT_ERR_MACHINE_CHECK , 41, "VM-entry failure due to machine-check."),
106 EXIT_REASON_NIL(),
107 EXIT_REASON(VMX_EXIT_TPR_BELOW_THRESHOLD, 43, "TPR below threshold. Guest attempted to execute MOV to CR8."),
108 EXIT_REASON(VMX_EXIT_APIC_ACCESS , 44, "APIC access. Guest attempted to access memory at a physical address on the APIC-access page."),
109 EXIT_REASON_NIL(),
110 EXIT_REASON(VMX_EXIT_XDTR_ACCESS , 46, "Access to GDTR or IDTR. Guest attempted to execute LGDT, LIDT, SGDT, or SIDT."),
111 EXIT_REASON(VMX_EXIT_TR_ACCESS , 47, "Access to LDTR or TR. Guest attempted to execute LLDT, LTR, SLDT, or STR."),
112 EXIT_REASON(VMX_EXIT_EPT_VIOLATION , 48, "EPT violation. An attempt to access memory with a guest-physical address was disallowed by the configuration of the EPT paging structures."),
113 EXIT_REASON(VMX_EXIT_EPT_MISCONFIG , 49, "EPT misconfiguration. An attempt to access memory with a guest-physical address encountered a misconfigured EPT paging-structure entry."),
114 EXIT_REASON(VMX_EXIT_INVEPT , 50, "Guest attempted to execute INVEPT."),
115 EXIT_REASON(VMX_EXIT_RDTSCP , 51, "Guest attempted to execute RDTSCP."),
116 EXIT_REASON(VMX_EXIT_PREEMPT_TIMER , 52, "VMX-preemption timer expired."),
117 EXIT_REASON(VMX_EXIT_INVVPID , 53, "Guest attempted to execute INVVPID."),
118 EXIT_REASON(VMX_EXIT_WBINVD , 54, "Guest attempted to execute WBINVD."),
119 EXIT_REASON(VMX_EXIT_XSETBV , 55, "Guest attempted to execute XSETBV."),
120 EXIT_REASON_NIL(),
121 EXIT_REASON(VMX_EXIT_RDRAND , 57, "Guest attempted to execute RDRAND."),
122 EXIT_REASON(VMX_EXIT_INVPCID , 58, "Guest attempted to execute INVPCID."),
123 EXIT_REASON(VMX_EXIT_VMFUNC , 59, "Guest attempted to execute VMFUNC.")
124};
125/** Exit reason descriptions for AMD-V, used to describe statistics. */
126static const char * const g_apszAmdVExitReasons[MAX_EXITREASON_STAT] =
127{
128 EXIT_REASON(SVM_EXIT_READ_CR0 , 0, "Read CR0."),
129 EXIT_REASON(SVM_EXIT_READ_CR1 , 1, "Read CR1."),
130 EXIT_REASON(SVM_EXIT_READ_CR2 , 2, "Read CR2."),
131 EXIT_REASON(SVM_EXIT_READ_CR3 , 3, "Read CR3."),
132 EXIT_REASON(SVM_EXIT_READ_CR4 , 4, "Read CR4."),
133 EXIT_REASON(SVM_EXIT_READ_CR5 , 5, "Read CR5."),
134 EXIT_REASON(SVM_EXIT_READ_CR6 , 6, "Read CR6."),
135 EXIT_REASON(SVM_EXIT_READ_CR7 , 7, "Read CR7."),
136 EXIT_REASON(SVM_EXIT_READ_CR8 , 8, "Read CR8."),
137 EXIT_REASON(SVM_EXIT_READ_CR9 , 9, "Read CR9."),
138 EXIT_REASON(SVM_EXIT_READ_CR10 , 10, "Read CR10."),
139 EXIT_REASON(SVM_EXIT_READ_CR11 , 11, "Read CR11."),
140 EXIT_REASON(SVM_EXIT_READ_CR12 , 12, "Read CR12."),
141 EXIT_REASON(SVM_EXIT_READ_CR13 , 13, "Read CR13."),
142 EXIT_REASON(SVM_EXIT_READ_CR14 , 14, "Read CR14."),
143 EXIT_REASON(SVM_EXIT_READ_CR15 , 15, "Read CR15."),
144 EXIT_REASON(SVM_EXIT_WRITE_CR0 , 16, "Write CR0."),
145 EXIT_REASON(SVM_EXIT_WRITE_CR1 , 17, "Write CR1."),
146 EXIT_REASON(SVM_EXIT_WRITE_CR2 , 18, "Write CR2."),
147 EXIT_REASON(SVM_EXIT_WRITE_CR3 , 19, "Write CR3."),
148 EXIT_REASON(SVM_EXIT_WRITE_CR4 , 20, "Write CR4."),
149 EXIT_REASON(SVM_EXIT_WRITE_CR5 , 21, "Write CR5."),
150 EXIT_REASON(SVM_EXIT_WRITE_CR6 , 22, "Write CR6."),
151 EXIT_REASON(SVM_EXIT_WRITE_CR7 , 23, "Write CR7."),
152 EXIT_REASON(SVM_EXIT_WRITE_CR8 , 24, "Write CR8."),
153 EXIT_REASON(SVM_EXIT_WRITE_CR9 , 25, "Write CR9."),
154 EXIT_REASON(SVM_EXIT_WRITE_CR10 , 26, "Write CR10."),
155 EXIT_REASON(SVM_EXIT_WRITE_CR11 , 27, "Write CR11."),
156 EXIT_REASON(SVM_EXIT_WRITE_CR12 , 28, "Write CR12."),
157 EXIT_REASON(SVM_EXIT_WRITE_CR13 , 29, "Write CR13."),
158 EXIT_REASON(SVM_EXIT_WRITE_CR14 , 30, "Write CR14."),
159 EXIT_REASON(SVM_EXIT_WRITE_CR15 , 31, "Write CR15."),
160 EXIT_REASON(SVM_EXIT_READ_DR0 , 32, "Read DR0."),
161 EXIT_REASON(SVM_EXIT_READ_DR1 , 33, "Read DR1."),
162 EXIT_REASON(SVM_EXIT_READ_DR2 , 34, "Read DR2."),
163 EXIT_REASON(SVM_EXIT_READ_DR3 , 35, "Read DR3."),
164 EXIT_REASON(SVM_EXIT_READ_DR4 , 36, "Read DR4."),
165 EXIT_REASON(SVM_EXIT_READ_DR5 , 37, "Read DR5."),
166 EXIT_REASON(SVM_EXIT_READ_DR6 , 38, "Read DR6."),
167 EXIT_REASON(SVM_EXIT_READ_DR7 , 39, "Read DR7."),
168 EXIT_REASON(SVM_EXIT_READ_DR8 , 40, "Read DR8."),
169 EXIT_REASON(SVM_EXIT_READ_DR9 , 41, "Read DR9."),
170 EXIT_REASON(SVM_EXIT_READ_DR10 , 42, "Read DR10."),
171 EXIT_REASON(SVM_EXIT_READ_DR11 , 43, "Read DR11"),
172 EXIT_REASON(SVM_EXIT_READ_DR12 , 44, "Read DR12."),
173 EXIT_REASON(SVM_EXIT_READ_DR13 , 45, "Read DR13."),
174 EXIT_REASON(SVM_EXIT_READ_DR14 , 46, "Read DR14."),
175 EXIT_REASON(SVM_EXIT_READ_DR15 , 47, "Read DR15."),
176 EXIT_REASON(SVM_EXIT_WRITE_DR0 , 48, "Write DR0."),
177 EXIT_REASON(SVM_EXIT_WRITE_DR1 , 49, "Write DR1."),
178 EXIT_REASON(SVM_EXIT_WRITE_DR2 , 50, "Write DR2."),
179 EXIT_REASON(SVM_EXIT_WRITE_DR3 , 51, "Write DR3."),
180 EXIT_REASON(SVM_EXIT_WRITE_DR4 , 52, "Write DR4."),
181 EXIT_REASON(SVM_EXIT_WRITE_DR5 , 53, "Write DR5."),
182 EXIT_REASON(SVM_EXIT_WRITE_DR6 , 54, "Write DR6."),
183 EXIT_REASON(SVM_EXIT_WRITE_DR7 , 55, "Write DR7."),
184 EXIT_REASON(SVM_EXIT_WRITE_DR8 , 56, "Write DR8."),
185 EXIT_REASON(SVM_EXIT_WRITE_DR9 , 57, "Write DR9."),
186 EXIT_REASON(SVM_EXIT_WRITE_DR10 , 58, "Write DR10."),
187 EXIT_REASON(SVM_EXIT_WRITE_DR11 , 59, "Write DR11."),
188 EXIT_REASON(SVM_EXIT_WRITE_DR12 , 60, "Write DR12."),
189 EXIT_REASON(SVM_EXIT_WRITE_DR13 , 61, "Write DR13."),
190 EXIT_REASON(SVM_EXIT_WRITE_DR14 , 62, "Write DR14."),
191 EXIT_REASON(SVM_EXIT_WRITE_DR15 , 63, "Write DR15."),
192 EXIT_REASON(SVM_EXIT_EXCEPTION_0 , 64, "Exception Vector 0 (#DE)."),
193 EXIT_REASON(SVM_EXIT_EXCEPTION_1 , 65, "Exception Vector 1 (#DB)."),
194 EXIT_REASON(SVM_EXIT_EXCEPTION_2 , 66, "Exception Vector 2 (#NMI)."),
195 EXIT_REASON(SVM_EXIT_EXCEPTION_3 , 67, "Exception Vector 3 (#BP)."),
196 EXIT_REASON(SVM_EXIT_EXCEPTION_4 , 68, "Exception Vector 4 (#OF)."),
197 EXIT_REASON(SVM_EXIT_EXCEPTION_5 , 69, "Exception Vector 5 (#BR)."),
198 EXIT_REASON(SVM_EXIT_EXCEPTION_6 , 70, "Exception Vector 6 (#UD)."),
199 EXIT_REASON(SVM_EXIT_EXCEPTION_7 , 71, "Exception Vector 7 (#NM)."),
200 EXIT_REASON(SVM_EXIT_EXCEPTION_8 , 72, "Exception Vector 8 (#DF)."),
201 EXIT_REASON(SVM_EXIT_EXCEPTION_9 , 73, "Exception Vector 9 (#CO_SEG_OVERRUN)."),
202 EXIT_REASON(SVM_EXIT_EXCEPTION_A , 74, "Exception Vector 10 (#TS)."),
203 EXIT_REASON(SVM_EXIT_EXCEPTION_B , 75, "Exception Vector 11 (#NP)."),
204 EXIT_REASON(SVM_EXIT_EXCEPTION_C , 76, "Exception Vector 12 (#SS)."),
205 EXIT_REASON(SVM_EXIT_EXCEPTION_D , 77, "Exception Vector 13 (#GP)."),
206 EXIT_REASON(SVM_EXIT_EXCEPTION_E , 78, "Exception Vector 14 (#PF)."),
207 EXIT_REASON(SVM_EXIT_EXCEPTION_F , 79, "Exception Vector 15 (0x0f)."),
208 EXIT_REASON(SVM_EXIT_EXCEPTION_10 , 80, "Exception Vector 16 (#MF)."),
209 EXIT_REASON(SVM_EXIT_EXCEPTION_11 , 81, "Exception Vector 17 (#AC)."),
210 EXIT_REASON(SVM_EXIT_EXCEPTION_12 , 82, "Exception Vector 18 (#MC)."),
211 EXIT_REASON(SVM_EXIT_EXCEPTION_13 , 83, "Exception Vector 19 (#XF)."),
212 EXIT_REASON(SVM_EXIT_EXCEPTION_14 , 84, "Exception Vector 20 (0x14)."),
213 EXIT_REASON(SVM_EXIT_EXCEPTION_15 , 85, "Exception Vector 22 (0x15)."),
214 EXIT_REASON(SVM_EXIT_EXCEPTION_16 , 86, "Exception Vector 22 (0x16)."),
215 EXIT_REASON(SVM_EXIT_EXCEPTION_17 , 87, "Exception Vector 23 (0x17)."),
216 EXIT_REASON(SVM_EXIT_EXCEPTION_18 , 88, "Exception Vector 24 (0x18)."),
217 EXIT_REASON(SVM_EXIT_EXCEPTION_19 , 89, "Exception Vector 25 (0x19)."),
218 EXIT_REASON(SVM_EXIT_EXCEPTION_1A , 90, "Exception Vector 26 (0x1A)."),
219 EXIT_REASON(SVM_EXIT_EXCEPTION_1B , 91, "Exception Vector 27 (0x1B)."),
220 EXIT_REASON(SVM_EXIT_EXCEPTION_1C , 92, "Exception Vector 28 (0x1C)."),
221 EXIT_REASON(SVM_EXIT_EXCEPTION_1D , 93, "Exception Vector 29 (0x1D)."),
222 EXIT_REASON(SVM_EXIT_EXCEPTION_1E , 94, "Exception Vector 30 (0x1E)."),
223 EXIT_REASON(SVM_EXIT_EXCEPTION_1F , 95, "Exception Vector 31 (0x1F)."),
224 EXIT_REASON(SVM_EXIT_INTR , 96, "Physical maskable interrupt (host)."),
225 EXIT_REASON(SVM_EXIT_NMI , 97, "Physical non-maskable interrupt (host)."),
226 EXIT_REASON(SVM_EXIT_SMI , 98, "System management interrupt (host)."),
227 EXIT_REASON(SVM_EXIT_INIT , 99, "Physical INIT signal (host)."),
228 EXIT_REASON(SVM_EXIT_VINTR ,100, "Virtual interrupt-window exit."),
229 EXIT_REASON(SVM_EXIT_CR0_SEL_WRITE ,101, "Write to CR0 that changed any bits other than CR0.TS or CR0.MP."),
230 EXIT_REASON(SVM_EXIT_IDTR_READ ,102, "Read IDTR"),
231 EXIT_REASON(SVM_EXIT_GDTR_READ ,103, "Read GDTR"),
232 EXIT_REASON(SVM_EXIT_LDTR_READ ,104, "Read LDTR."),
233 EXIT_REASON(SVM_EXIT_TR_READ ,105, "Read TR."),
234 EXIT_REASON(SVM_EXIT_TR_READ ,106, "Write IDTR."),
235 EXIT_REASON(SVM_EXIT_TR_READ ,107, "Write GDTR."),
236 EXIT_REASON(SVM_EXIT_TR_READ ,108, "Write LDTR."),
237 EXIT_REASON(SVM_EXIT_TR_READ ,109, "Write TR."),
238 EXIT_REASON(SVM_EXIT_RDTSC ,110, "RDTSC instruction."),
239 EXIT_REASON(SVM_EXIT_RDPMC ,111, "RDPMC instruction."),
240 EXIT_REASON(SVM_EXIT_PUSHF ,112, "PUSHF instruction."),
241 EXIT_REASON(SVM_EXIT_POPF ,113, "POPF instruction."),
242 EXIT_REASON(SVM_EXIT_CPUID ,114, "CPUID instruction."),
243 EXIT_REASON(SVM_EXIT_RSM ,115, "RSM instruction."),
244 EXIT_REASON(SVM_EXIT_IRET ,116, "IRET instruction."),
245 EXIT_REASON(SVM_EXIT_SWINT ,117, "Software interrupt (INTn instructions)."),
246 EXIT_REASON(SVM_EXIT_INVD ,118, "INVD instruction."),
247 EXIT_REASON(SVM_EXIT_PAUSE ,119, "PAUSE instruction."),
248 EXIT_REASON(SVM_EXIT_HLT ,120, "HLT instruction."),
249 EXIT_REASON(SVM_EXIT_INVLPG ,121, "INVLPG instruction."),
250 EXIT_REASON(SVM_EXIT_INVLPGA ,122, "INVLPGA instruction."),
251 EXIT_REASON(SVM_EXIT_IOIO ,123, "IN/OUT accessing protected port."),
252 EXIT_REASON(SVM_EXIT_MSR ,124, "RDMSR or WRMSR access to protected MSR."),
253 EXIT_REASON(SVM_EXIT_TASK_SWITCH ,125, "Task switch."),
254 EXIT_REASON(SVM_EXIT_FERR_FREEZE ,126, "Legacy FPU handling enabled; processor is frozen in an x87/mmx instruction waiting for an interrupt"),
255 EXIT_REASON(SVM_EXIT_SHUTDOWN ,127, "Shutdown."),
256 EXIT_REASON(SVM_EXIT_VMRUN ,128, "VMRUN instruction."),
257 EXIT_REASON(SVM_EXIT_VMMCALL ,129, "VMCALL instruction."),
258 EXIT_REASON(SVM_EXIT_VMLOAD ,130, "VMLOAD instruction."),
259 EXIT_REASON(SVM_EXIT_VMSAVE ,131, "VMSAVE instruction."),
260 EXIT_REASON(SVM_EXIT_STGI ,132, "STGI instruction."),
261 EXIT_REASON(SVM_EXIT_CLGI ,133, "CLGI instruction."),
262 EXIT_REASON(SVM_EXIT_SKINIT ,134, "SKINIT instruction."),
263 EXIT_REASON(SVM_EXIT_RDTSCP ,135, "RDTSCP instruction."),
264 EXIT_REASON(SVM_EXIT_ICEBP ,136, "ICEBP instruction."),
265 EXIT_REASON(SVM_EXIT_WBINVD ,137, "WBINVD instruction."),
266 EXIT_REASON(SVM_EXIT_MONITOR ,138, "MONITOR instruction."),
267 EXIT_REASON(SVM_EXIT_MWAIT ,139, "MWAIT instruction."),
268 EXIT_REASON(SVM_EXIT_MWAIT_ARMED ,140, "MWAIT instruction when armed."),
269 EXIT_REASON(SVM_EXIT_NPF ,1024, "Nested paging fault."),
270 EXIT_REASON_NIL()
271};
272# undef EXIT_REASON
273# undef EXIT_REASON_NIL
274#endif /* VBOX_WITH_STATISTICS */
275
276#define HMVMX_REPORT_FEATURE(allowed1, disallowed0, featflag) \
277 do { \
278 if ((allowed1) & (featflag)) \
279 LogRel(("HM: " #featflag "\n")); \
280 else \
281 LogRel(("HM: " #featflag " *must* be cleared\n")); \
282 if ((disallowed0) & (featflag)) \
283 LogRel(("HM: " #featflag " *must* be set\n")); \
284 } while (0)
285
286#define HMVMX_REPORT_ALLOWED_FEATURE(allowed1, featflag) \
287 do { \
288 if ((allowed1) & (featflag)) \
289 LogRel(("HM: " #featflag "\n")); \
290 else \
291 LogRel(("HM: " #featflag " not supported\n")); \
292 } while (0)
293
294#define HMVMX_REPORT_CAPABILITY(msrcaps, cap) \
295 do { \
296 if ((msrcaps) & (cap)) \
297 LogRel(("HM: " #cap "\n")); \
298 } while (0)
299
300
301/*******************************************************************************
302* Internal Functions *
303*******************************************************************************/
304static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM);
305static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
306static int hmR3InitCPU(PVM pVM);
307static int hmR3InitFinalizeR0(PVM pVM);
308static int hmR3InitFinalizeR0Intel(PVM pVM);
309static int hmR3InitFinalizeR0Amd(PVM pVM);
310static int hmR3TermCPU(PVM pVM);
311
312
313
314/**
315 * Initializes the HM.
316 *
317 * This reads the config and check whether VT-x or AMD-V hardware is available
318 * if configured to use it. This is one of the very first components to be
319 * initialized after CFGM, so that we can fall back to raw-mode early in the
320 * initialization process.
321 *
322 * Note that a lot of the set up work is done in ring-0 and thus postponed till
323 * the ring-3 and ring-0 callback to HMR3InitCompleted.
324 *
325 * @returns VBox status code.
326 * @param pVM Pointer to the VM.
327 *
328 * @remarks Be careful with what we call here, since most of the VMM components
329 * are uninitialized.
330 */
331VMMR3_INT_DECL(int) HMR3Init(PVM pVM)
332{
333 LogFlow(("HMR3Init\n"));
334
335 /*
336 * Assert alignment and sizes.
337 */
338 AssertCompileMemberAlignment(VM, hm.s, 32);
339 AssertCompile(sizeof(pVM->hm.s) <= sizeof(pVM->hm.padding));
340
341 /*
342 * Register the saved state data unit.
343 */
344 int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HM_SSM_VERSION, sizeof(HM),
345 NULL, NULL, NULL,
346 NULL, hmR3Save, NULL,
347 NULL, hmR3Load, NULL);
348 if (RT_FAILURE(rc))
349 return rc;
350
351 /*
352 * Misc initialisation.
353 */
354 //pVM->hm.s.vmx.fSupported = false;
355 //pVM->hm.s.svm.fSupported = false;
356 //pVM->hm.s.vmx.fEnabled = false;
357 //pVM->hm.s.svm.fEnabled = false;
358 //pVM->hm.s.fNestedPaging = false;
359
360
361 /*
362 * Read configuration.
363 */
364 PCFGMNODE pCfgHM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "HM/");
365
366 /** @cfgm{/HM/HMForced, bool, false}
367 * Forces hardware virtualization, no falling back on raw-mode. HM must be
368 * enabled, i.e. /HMEnabled must be true. */
369 bool fHMForced;
370#ifdef VBOX_WITH_RAW_MODE
371 rc = CFGMR3QueryBoolDef(pCfgHM, "HMForced", &fHMForced, false);
372 AssertRCReturn(rc, rc);
373 AssertLogRelMsgReturn(!fHMForced || pVM->fHMEnabled, ("Configuration error: HM forced but not enabled!\n"),
374 VERR_INVALID_PARAMETER);
375# if defined(RT_OS_DARWIN)
376 if (pVM->fHMEnabled)
377 fHMForced = true;
378# endif
379 AssertLogRelMsgReturn(pVM->cCpus == 1 || pVM->fHMEnabled, ("Configuration error: SMP requires HM to be enabled!\n"),
380 VERR_INVALID_PARAMETER);
381 if (pVM->cCpus > 1)
382 fHMForced = true;
383#else /* !VBOX_WITH_RAW_MODE */
384 AssertRelease(pVM->fHMEnabled);
385 fHMForced = true;
386#endif /* !VBOX_WITH_RAW_MODE */
387
388 /** @cfgm{/HM/EnableNestedPaging, bool, false}
389 * Enables nested paging (aka extended page tables). */
390 rc = CFGMR3QueryBoolDef(pCfgHM, "EnableNestedPaging", &pVM->hm.s.fAllowNestedPaging, false);
391 AssertRCReturn(rc, rc);
392
393 /** @cfgm{/HM/EnableUX, bool, true}
394 * Enables the VT-x unrestricted execution feature. */
395 rc = CFGMR3QueryBoolDef(pCfgHM, "EnableUX", &pVM->hm.s.vmx.fAllowUnrestricted, true);
396 AssertRCReturn(rc, rc);
397
398 /** @cfgm{/HM/EnableLargePages, bool, false}
399 * Enables using large pages (2 MB) for guest memory, thus saving on (nested)
400 * page table walking and maybe better TLB hit rate in some cases. */
401 rc = CFGMR3QueryBoolDef(pCfgHM, "EnableLargePages", &pVM->hm.s.fLargePages, false);
402 AssertRCReturn(rc, rc);
403
404 /** @cfgm{/HM/EnableVPID, bool, false}
405 * Enables the VT-x VPID feature. */
406 rc = CFGMR3QueryBoolDef(pCfgHM, "EnableVPID", &pVM->hm.s.vmx.fAllowVpid, false);
407 AssertRCReturn(rc, rc);
408
409 /** @cfgm{/HM/TPRPatchingEnabled, bool, false}
410 * Enables TPR patching for 32-bit windows guests with IO-APIC. */
411 rc = CFGMR3QueryBoolDef(pCfgHM, "TPRPatchingEnabled", &pVM->hm.s.fTRPPatchingAllowed, false);
412 AssertRCReturn(rc, rc);
413
414 /** @cfgm{/HM/64bitEnabled, bool, 32-bit:false, 64-bit:true}
415 * Enables AMD64 cpu features.
416 * On 32-bit hosts this isn't default and require host CPU support. 64-bit hosts
417 * already have the support. */
418#ifdef VBOX_ENABLE_64_BITS_GUESTS
419 rc = CFGMR3QueryBoolDef(pCfgHM, "64bitEnabled", &pVM->hm.s.fAllow64BitGuests, HC_ARCH_BITS == 64);
420 AssertLogRelRCReturn(rc, rc);
421#else
422 pVM->hm.s.fAllow64BitGuests = false;
423#endif
424
425 /** @cfgm{/HM/Exclusive, bool}
426 * Determines the init method for AMD-V and VT-x. If set to true, HM will do a
427 * global init for each host CPU. If false, we do local init each time we wish
428 * to execute guest code.
429 *
430 * Default is false for Mac OS X and Windows due to the higher risk of conflicts
431 * with other hypervisors.
432 */
433 rc = CFGMR3QueryBoolDef(pCfgHM, "Exclusive", &pVM->hm.s.fGlobalInit,
434#if defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS)
435 false
436#else
437 true
438#endif
439 );
440 AssertLogRelRCReturn(rc, rc);
441
442 /** @cfgm{/HM/MaxResumeLoops, uint32_t}
443 * The number of times to resume guest execution before we forcibly return to
444 * ring-3. The return value of RTThreadPreemptIsPendingTrusty in ring-0
445 * determines the default value. */
446 rc = CFGMR3QueryU32Def(pCfgHM, "MaxResumeLoops", &pVM->hm.s.cMaxResumeLoops, 0 /* set by R0 later */);
447 AssertLogRelRCReturn(rc, rc);
448
449 /*
450 * Check if VT-x or AMD-v support according to the users wishes.
451 */
452 /** @todo SUPR3QueryVTCaps won't catch VERR_VMX_IN_VMX_ROOT_MODE or
453 * VERR_SVM_IN_USE. */
454 if (pVM->fHMEnabled)
455 {
456 uint32_t fCaps;
457 rc = SUPR3QueryVTCaps(&fCaps);
458 if (RT_SUCCESS(rc))
459 {
460 if (fCaps & SUPVTCAPS_AMD_V)
461 LogRel(("HMR3Init: AMD-V%s\n", fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : ""));
462 else if (fCaps & SUPVTCAPS_VT_X)
463 {
464 rc = SUPR3QueryVTxSupported();
465 if (RT_SUCCESS(rc))
466 LogRel(("HMR3Init: VT-x%s\n", fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : ""));
467 else
468 {
469#ifdef RT_OS_LINUX
470 const char *pszMinReq = " Linux 2.6.13 or newer required!";
471#else
472 const char *pszMinReq = "";
473#endif
474 if (fHMForced)
475 return VMSetError(pVM, rc, RT_SRC_POS, "The host kernel does not support VT-x.%s\n", pszMinReq);
476
477 /* Fall back to raw-mode. */
478 LogRel(("HMR3Init: Falling back to raw-mode: The host kernel does not support VT-x.%s\n", pszMinReq));
479 pVM->fHMEnabled = false;
480 }
481 }
482 else
483 AssertLogRelMsgFailedReturn(("SUPR3QueryVTCaps didn't return either AMD-V or VT-x flag set (%#x)!\n", fCaps),
484 VERR_INTERNAL_ERROR_5);
485
486 /*
487 * Do we require a little bit or raw-mode for 64-bit guest execution?
488 */
489 pVM->fHMNeedRawModeCtx = HC_ARCH_BITS == 32
490 && pVM->fHMEnabled
491 && pVM->hm.s.fAllow64BitGuests;
492 }
493 else
494 {
495 const char *pszMsg;
496 switch (rc)
497 {
498 case VERR_UNSUPPORTED_CPU:
499 pszMsg = "Unknown CPU, VT-x or AMD-v features cannot be ascertained.";
500 break;
501
502 case VERR_VMX_NO_VMX:
503 pszMsg = "VT-x is not available.";
504 break;
505
506 case VERR_VMX_MSR_LOCKED_OR_DISABLED:
507 pszMsg = "VT-x is disabled in the BIOS (or by the host OS).";
508 break;
509
510 case VERR_SVM_NO_SVM:
511 pszMsg = "AMD-V is not available.";
512 break;
513
514 case VERR_SVM_DISABLED:
515 pszMsg = "AMD-V is disabled in the BIOS (or by the host OS).";
516 break;
517
518 default:
519 pszMsg = NULL;
520 break;
521 }
522 if (fHMForced && pszMsg)
523 return VM_SET_ERROR(pVM, rc, pszMsg);
524 if (!pszMsg)
525 return VMSetError(pVM, rc, RT_SRC_POS, "SUPR3QueryVTCaps failed with %Rrc", rc);
526
527 /* Fall back to raw-mode. */
528 LogRel(("HMR3Init: Falling back to raw-mode: %s\n", pszMsg));
529 pVM->fHMEnabled = false;
530 }
531 }
532
533 /* It's now OK to use the predicate function. */
534 pVM->fHMEnabledFixed = true;
535 return VINF_SUCCESS;
536}
537
538
539/**
540 * Initializes the per-VCPU HM.
541 *
542 * @returns VBox status code.
543 * @param pVM Pointer to the VM.
544 */
545static int hmR3InitCPU(PVM pVM)
546{
547 LogFlow(("HMR3InitCPU\n"));
548
549 if (!HMIsEnabled(pVM))
550 return VINF_SUCCESS;
551
552 for (VMCPUID i = 0; i < pVM->cCpus; i++)
553 {
554 PVMCPU pVCpu = &pVM->aCpus[i];
555 pVCpu->hm.s.fActive = false;
556 }
557
558#ifdef VBOX_WITH_STATISTICS
559 STAM_REG(pVM, &pVM->hm.s.StatTprPatchSuccess, STAMTYPE_COUNTER, "/HM/TPR/Patch/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
560 STAM_REG(pVM, &pVM->hm.s.StatTprPatchFailure, STAMTYPE_COUNTER, "/HM/TPR/Patch/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
561 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceSuccess, STAMTYPE_COUNTER, "/HM/TPR/Replace/Success",STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
562 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceFailure, STAMTYPE_COUNTER, "/HM/TPR/Replace/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
563
564 /*
565 * Statistics.
566 */
567 for (VMCPUID i = 0; i < pVM->cCpus; i++)
568 {
569 PVMCPU pVCpu = &pVM->aCpus[i];
570 int rc;
571
572 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
573 "Profiling of RTMpPokeCpu",
574 "/PROF/CPU%d/HM/Poke", i);
575 AssertRC(rc);
576 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatSpinPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
577 "Profiling of poke wait",
578 "/PROF/CPU%d/HM/PokeWait", i);
579 AssertRC(rc);
580 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatSpinPokeFailed, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
581 "Profiling of poke wait when RTMpPokeCpu fails",
582 "/PROF/CPU%d/HM/PokeWaitFailed", i);
583 AssertRC(rc);
584 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatEntry, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
585 "Profiling of VMXR0RunGuestCode entry",
586 "/PROF/CPU%d/HM/StatEntry", i);
587 AssertRC(rc);
588 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
589 "Profiling of VMXR0RunGuestCode exit part 1",
590 "/PROF/CPU%d/HM/SwitchFromGC_1", i);
591 AssertRC(rc);
592 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
593 "Profiling of VMXR0RunGuestCode exit part 2",
594 "/PROF/CPU%d/HM/SwitchFromGC_2", i);
595 AssertRC(rc);
596
597 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitIO, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
598 "I/O",
599 "/PROF/CPU%d/HM/SwitchFromGC_2/IO", i);
600 AssertRC(rc);
601 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitMovCRx, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
602 "MOV CRx",
603 "/PROF/CPU%d/HM/SwitchFromGC_2/MovCRx", i);
604 AssertRC(rc);
605 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitXcptNmi, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
606 "Exceptions, NMIs",
607 "/PROF/CPU%d/HM/SwitchFromGC_2/XcptNmi", i);
608 AssertRC(rc);
609
610 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatLoadGuestState, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
611 "Profiling of VMXR0LoadGuestState",
612 "/PROF/CPU%d/HM/StatLoadGuestState", i);
613 AssertRC(rc);
614 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatInGC, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
615 "Profiling of VMLAUNCH/VMRESUME.",
616 "/PROF/CPU%d/HM/InGC", i);
617 AssertRC(rc);
618
619# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
620 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatWorldSwitch3264, STAMTYPE_PROFILE, STAMVISIBILITY_USED,
621 STAMUNIT_TICKS_PER_CALL, "Profiling of the 32/64 switcher.",
622 "/PROF/CPU%d/HM/Switcher3264", i);
623 AssertRC(rc);
624# endif
625
626# ifdef HM_PROFILE_EXIT_DISPATCH
627 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitDispatch, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_USED,
628 STAMUNIT_TICKS_PER_CALL, "Profiling the dispatching of exit handlers.",
629 "/PROF/CPU%d/HM/ExitDispatch", i);
630 AssertRC(rc);
631# endif
632
633# define HM_REG_COUNTER(a, b, desc) \
634 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, desc, b, i); \
635 AssertRC(rc);
636
637 HM_REG_COUNTER(&pVCpu->hm.s.StatExitShadowNM, "/HM/CPU%d/Exit/Trap/Shw/#NM", "Shadow #NM (device not available, no math co-processor) exception.");
638 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestNM, "/HM/CPU%d/Exit/Trap/Gst/#NM", "Guest #NM (device not available, no math co-processor) exception.");
639 HM_REG_COUNTER(&pVCpu->hm.s.StatExitShadowPF, "/HM/CPU%d/Exit/Trap/Shw/#PF", "Shadow #PF (page fault) exception.");
640 HM_REG_COUNTER(&pVCpu->hm.s.StatExitShadowPFEM, "/HM/CPU%d/Exit/Trap/Shw/#PF-EM", "#PF (page fault) exception going back to ring-3 for emulating the instruction.");
641 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestPF, "/HM/CPU%d/Exit/Trap/Gst/#PF", "Guest #PF (page fault) exception.");
642 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestUD, "/HM/CPU%d/Exit/Trap/Gst/#UD", "Guest #UD (undefined opcode) exception.");
643 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestSS, "/HM/CPU%d/Exit/Trap/Gst/#SS", "Guest #SS (stack-segment fault) exception.");
644 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestNP, "/HM/CPU%d/Exit/Trap/Gst/#NP", "Guest #NP (segment not present) exception.");
645 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestGP, "/HM/CPU%d/Exit/Trap/Gst/#GP", "Guest #GP (general protection) execption.");
646 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestMF, "/HM/CPU%d/Exit/Trap/Gst/#MF", "Guest #MF (x87 FPU error, math fault) exception.");
647 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestDE, "/HM/CPU%d/Exit/Trap/Gst/#DE", "Guest #DE (divide error) exception.");
648 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestDB, "/HM/CPU%d/Exit/Trap/Gst/#DB", "Guest #DB (debug) exception.");
649 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestBP, "/HM/CPU%d/Exit/Trap/Gst/#BP", "Guest #BP (breakpoint) exception.");
650 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXF, "/HM/CPU%d/Exit/Trap/Gst/#XF", "Guest #XF (extended math fault, SIMD FPU) exception.");
651 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXcpUnk, "/HM/CPU%d/Exit/Trap/Gst/Other", "Other guest exceptions.");
652 HM_REG_COUNTER(&pVCpu->hm.s.StatExitInvlpg, "/HM/CPU%d/Exit/Instr/Invlpg", "Guest attempted to execute INVLPG.");
653 HM_REG_COUNTER(&pVCpu->hm.s.StatExitInvd, "/HM/CPU%d/Exit/Instr/Invd", "Guest attempted to execute INVD.");
654 HM_REG_COUNTER(&pVCpu->hm.s.StatExitWbinvd, "/HM/CPU%d/Exit/Instr/Wbinvd", "Guest attempted to execute WBINVD.");
655 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPause, "/HM/CPU%d/Exit/Instr/Pause", "Guest attempted to execute PAUSE.");
656 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCpuid, "/HM/CPU%d/Exit/Instr/Cpuid", "Guest attempted to execute CPUID.");
657 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdtsc, "/HM/CPU%d/Exit/Instr/Rdtsc", "Guest attempted to execute RDTSC.");
658 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdtscp, "/HM/CPU%d/Exit/Instr/Rdtscp", "Guest attempted to execute RDTSCP.");
659 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdpmc, "/HM/CPU%d/Exit/Instr/Rdpmc", "Guest attempted to execute RDPMC.");
660 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdrand, "/HM/CPU%d/Exit/Instr/Rdrand", "Guest attempted to execute RDRAND.");
661 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdmsr, "/HM/CPU%d/Exit/Instr/Rdmsr", "Guest attempted to execute RDMSR.");
662 HM_REG_COUNTER(&pVCpu->hm.s.StatExitWrmsr, "/HM/CPU%d/Exit/Instr/Wrmsr", "Guest attempted to execute WRMSR.");
663 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMwait, "/HM/CPU%d/Exit/Instr/Mwait", "Guest attempted to execute MWAIT.");
664 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMonitor, "/HM/CPU%d/Exit/Instr/Monitor", "Guest attempted to execute MONITOR.");
665 HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxWrite, "/HM/CPU%d/Exit/Instr/DR/Write", "Guest attempted to write a debug register.");
666 HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxRead, "/HM/CPU%d/Exit/Instr/DR/Read", "Guest attempted to read a debug register.");
667 HM_REG_COUNTER(&pVCpu->hm.s.StatExitClts, "/HM/CPU%d/Exit/Instr/CLTS", "Guest attempted to execute CLTS.");
668 HM_REG_COUNTER(&pVCpu->hm.s.StatExitLmsw, "/HM/CPU%d/Exit/Instr/LMSW", "Guest attempted to execute LMSW.");
669 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCli, "/HM/CPU%d/Exit/Instr/Cli", "Guest attempted to execute CLI.");
670 HM_REG_COUNTER(&pVCpu->hm.s.StatExitSti, "/HM/CPU%d/Exit/Instr/Sti", "Guest attempted to execute STI.");
671 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPushf, "/HM/CPU%d/Exit/Instr/Pushf", "Guest attempted to execute PUSHF.");
672 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPopf, "/HM/CPU%d/Exit/Instr/Popf", "Guest attempted to execute POPF.");
673 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIret, "/HM/CPU%d/Exit/Instr/Iret", "Guest attempted to execute IRET.");
674 HM_REG_COUNTER(&pVCpu->hm.s.StatExitInt, "/HM/CPU%d/Exit/Instr/Int", "Guest attempted to execute INT.");
675 HM_REG_COUNTER(&pVCpu->hm.s.StatExitHlt, "/HM/CPU%d/Exit/Instr/Hlt", "Guest attempted to execute HLT.");
676 HM_REG_COUNTER(&pVCpu->hm.s.StatExitXdtrAccess, "/HM/CPU%d/Exit/Instr/XdtrAccess", "Guest attempted to access descriptor table register (GDTR, IDTR, LDTR).");
677 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOWrite, "/HM/CPU%d/Exit/IO/Write", "I/O write.");
678 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIORead, "/HM/CPU%d/Exit/IO/Read", "I/O read.");
679 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringWrite, "/HM/CPU%d/Exit/IO/WriteString", "String I/O write.");
680 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringRead, "/HM/CPU%d/Exit/IO/ReadString", "String I/O read.");
681 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIntWindow, "/HM/CPU%d/Exit/IntWindow", "Interrupt-window exit. Guest is ready to receive interrupts again.");
682 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMaxResume, "/HM/CPU%d/Exit/MaxResume", "Maximum VMRESUME inner-loop counter reached.");
683 HM_REG_COUNTER(&pVCpu->hm.s.StatExitExtInt, "/HM/CPU%d/Exit/ExtInt", "Host interrupt received.");
684 HM_REG_COUNTER(&pVCpu->hm.s.StatExitHostNmi, "/HM/CPU%d/Exit/HostNmi", "Host NMI received.");
685 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPreemptTimer, "/HM/CPU%d/Exit/PreemptTimer", "VMX-preemption timer expired.");
686 HM_REG_COUNTER(&pVCpu->hm.s.StatExitTprBelowThreshold, "/HM/CPU%d/Exit/TprBelowThreshold", "TPR lowered below threshold by the guest.");
687 HM_REG_COUNTER(&pVCpu->hm.s.StatExitTaskSwitch, "/HM/CPU%d/Exit/TaskSwitch", "Guest attempted a task switch.");
688 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMtf, "/HM/CPU%d/Exit/MonitorTrapFlag", "Monitor Trap Flag.");
689 HM_REG_COUNTER(&pVCpu->hm.s.StatExitApicAccess, "/HM/CPU%d/Exit/ApicAccess", "APIC access. Guest attempted to access memory at a physical address on the APIC-access page.");
690
691 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchGuestIrq, "/HM/CPU%d/Switch/IrqPending", "PDMGetInterrupt() cleared behind our back!?!.");
692 HM_REG_COUNTER(&pVCpu->hm.s.StatPendingHostIrq, "/HM/CPU%d/Switch/PendingHostIrq", "Exit to ring-3 due to pending host interrupt before executing guest code.");
693 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchHmToR3FF, "/HM/CPU%d/Switch/HmToR3FF", "Exit to ring-3 due to pending timers, EMT rendezvous, critical section etc.");
694 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchExitToR3, "/HM/CPU%d/Switch/ExitToR3", "Exit to ring-3 (total).");
695 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchLongJmpToR3, "/HM/CPU%d/Switch/LongJmpToR3", "Longjump to ring-3.");
696
697 HM_REG_COUNTER(&pVCpu->hm.s.StatInjectInterrupt, "/HM/CPU%d/EventInject/Interrupt", "Injected an external interrupt into the guest.");
698 HM_REG_COUNTER(&pVCpu->hm.s.StatInjectXcpt, "/HM/CPU%d/EventInject/Trap", "Injected an exception into the guest.");
699 HM_REG_COUNTER(&pVCpu->hm.s.StatInjectPendingReflect, "/HM/CPU%d/EventInject/PendingReflect", "Reflecting an exception back to the guest.");
700
701 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPage, "/HM/CPU%d/Flush/Page", "Invalidating a guest page on all guest CPUs.");
702 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPageManual, "/HM/CPU%d/Flush/Page/Virt", "Invalidating a guest page using guest-virtual address.");
703 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPhysPageManual, "/HM/CPU%d/Flush/Page/Phys", "Invalidating a guest page using guest-physical address.");
704 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlb, "/HM/CPU%d/Flush/TLB", "Forcing a full guest-TLB flush (ring-0).");
705 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlbManual, "/HM/CPU%d/Flush/TLB/Manual", "Request a full guest-TLB flush.");
706 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlbWorldSwitch, "/HM/CPU%d/Flush/TLB/CpuSwitch", "Forcing a full guest-TLB flush due to host-CPU reschedule or ASID-limit hit by another guest-VCPU.");
707 HM_REG_COUNTER(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch, "/HM/CPU%d/Flush/TLB/Skipped", "No TLB flushing required.");
708 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushEntire, "/HM/CPU%d/Flush/TLB/Entire", "Flush the entire TLB (host + guest).");
709 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushAsid, "/HM/CPU%d/Flush/TLB/ASID", "Flushed guest-TLB entries for the current VPID.");
710 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushNestedPaging, "/HM/CPU%d/Flush/TLB/NestedPaging", "Flushed guest-TLB entries for the current EPT.");
711 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlbInvlpgVirt, "/HM/CPU%d/Flush/TLB/InvlpgVirt", "Invalidated a guest-TLB entry for a guest-virtual address.");
712 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlbInvlpgPhys, "/HM/CPU%d/Flush/TLB/InvlpgPhys", "Currently not possible, flushes entire guest-TLB.");
713 HM_REG_COUNTER(&pVCpu->hm.s.StatTlbShootdown, "/HM/CPU%d/Flush/Shootdown/Page", "Inter-VCPU request to flush queued guest page.");
714 HM_REG_COUNTER(&pVCpu->hm.s.StatTlbShootdownFlush, "/HM/CPU%d/Flush/Shootdown/TLB", "Inter-VCPU request to flush entire guest-TLB.");
715
716 HM_REG_COUNTER(&pVCpu->hm.s.StatTscOffset, "/HM/CPU%d/TSC/Offset", "TSC offsetting is in effect.");
717 HM_REG_COUNTER(&pVCpu->hm.s.StatTscIntercept, "/HM/CPU%d/TSC/Intercept", "Guest is in catchup mode, intercept TSC accesses.");
718 HM_REG_COUNTER(&pVCpu->hm.s.StatTscInterceptOverFlow, "/HM/CPU%d/TSC/InterceptOverflow", "TSC offset overflow, fallback to intercept TSC accesses.");
719
720 HM_REG_COUNTER(&pVCpu->hm.s.StatDRxArmed, "/HM/CPU%d/Debug/Armed", "Loaded guest-debug state while loading guest-state.");
721 HM_REG_COUNTER(&pVCpu->hm.s.StatDRxContextSwitch, "/HM/CPU%d/Debug/ContextSwitch", "Loaded guest-debug state on MOV DRx.");
722 HM_REG_COUNTER(&pVCpu->hm.s.StatDRxIoCheck, "/HM/CPU%d/Debug/IOCheck", "Checking for I/O breakpoint.");
723
724 HM_REG_COUNTER(&pVCpu->hm.s.StatLoadMinimal, "/HM/CPU%d/Load/Minimal", "VM-entry loading minimal guest-state.");
725 HM_REG_COUNTER(&pVCpu->hm.s.StatLoadFull, "/HM/CPU%d/Load/Full", "VM-entry loading the full guest-state.");
726
727 HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadRmSelBase, "/HM/CPU%d/VMXCheck/RMSelBase", "Could not use VMX due to unsuitable real-mode selector base.");
728 HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit, "/HM/CPU%d/VMXCheck/RMSelLimit", "Could not use VMX due to unsuitable real-mode selector limit.");
729 HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckRmOk, "/HM/CPU%d/VMXCheck/VMX_RM", "VMX execution in real (V86) mode OK.");
730 HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadSel, "/HM/CPU%d/VMXCheck/Selector", "Could not use VMX due to unsuitable selector.");
731 HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadRpl, "/HM/CPU%d/VMXCheck/RPL", "Could not use VMX due to unsuitable RPL.");
732 HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadLdt, "/HM/CPU%d/VMXCheck/LDT", "Could not use VMX due to unsuitable LDT.");
733 HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadTr, "/HM/CPU%d/VMXCheck/TR", "Could not use VMX due to unsuitable TR.");
734 HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckPmOk, "/HM/CPU%d/VMXCheck/VMX_PM", "VMX execution in protected mode OK.");
735
736#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
737 HM_REG_COUNTER(&pVCpu->hm.s.StatFpu64SwitchBack, "/HM/CPU%d/Switch64/Fpu", "Saving guest FPU/XMM state.");
738 HM_REG_COUNTER(&pVCpu->hm.s.StatDebug64SwitchBack, "/HM/CPU%d/Switch64/Debug", "Saving guest debug state.");
739#endif
740
741 for (unsigned j = 0; j < RT_ELEMENTS(pVCpu->hm.s.StatExitCRxWrite); j++)
742 {
743 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitCRxWrite[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
744 STAMUNIT_OCCURENCES, "Profiling of CRx writes",
745 "/HM/CPU%d/Exit/Instr/CR/Write/%x", i, j);
746 AssertRC(rc);
747 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitCRxRead[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
748 STAMUNIT_OCCURENCES, "Profiling of CRx reads",
749 "/HM/CPU%d/Exit/Instr/CR/Read/%x", i, j);
750 AssertRC(rc);
751 }
752
753#undef HM_REG_COUNTER
754
755 pVCpu->hm.s.paStatExitReason = NULL;
756
757 rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT*sizeof(*pVCpu->hm.s.paStatExitReason), 0, MM_TAG_HM,
758 (void **)&pVCpu->hm.s.paStatExitReason);
759 AssertRC(rc);
760 if (RT_SUCCESS(rc))
761 {
762 const char * const *papszDesc = ASMIsIntelCpu() ? &g_apszVTxExitReasons[0] : &g_apszAmdVExitReasons[0];
763 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
764 {
765 if (papszDesc[j])
766 {
767 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
768 STAMUNIT_OCCURENCES, papszDesc[j], "/HM/CPU%d/Exit/Reason/%02x", i, j);
769 AssertRC(rc);
770 }
771 }
772 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitReasonNpf, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
773 "Nested page fault", "/HM/CPU%d/Exit/Reason/#NPF", i);
774 AssertRC(rc);
775 }
776 pVCpu->hm.s.paStatExitReasonR0 = MMHyperR3ToR0(pVM, pVCpu->hm.s.paStatExitReason);
777# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
778 Assert(pVCpu->hm.s.paStatExitReasonR0 != NIL_RTR0PTR || !HMIsEnabled(pVM));
779# else
780 Assert(pVCpu->hm.s.paStatExitReasonR0 != NIL_RTR0PTR);
781# endif
782
783 rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * 256, 8, MM_TAG_HM, (void **)&pVCpu->hm.s.paStatInjectedIrqs);
784 AssertRCReturn(rc, rc);
785 pVCpu->hm.s.paStatInjectedIrqsR0 = MMHyperR3ToR0(pVM, pVCpu->hm.s.paStatInjectedIrqs);
786# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
787 Assert(pVCpu->hm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR || !HMIsEnabled(pVM));
788# else
789 Assert(pVCpu->hm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR);
790# endif
791 for (unsigned j = 0; j < 255; j++)
792 {
793 STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
794 "Injected event.",
795 (j < 0x20) ? "/HM/CPU%d/EventInject/Event/Trap/%02X" : "/HM/CPU%d/EventInject/Event/IRQ/%02X", i, j);
796 }
797
798 }
799#endif /* VBOX_WITH_STATISTICS */
800
801#ifdef VBOX_WITH_CRASHDUMP_MAGIC
802 /*
803 * Magic marker for searching in crash dumps.
804 */
805 for (VMCPUID i = 0; i < pVM->cCpus; i++)
806 {
807 PVMCPU pVCpu = &pVM->aCpus[i];
808
809 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
810 strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
811 pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
812 }
813#endif
814
815 return VINF_SUCCESS;
816}
817
818
819/**
820 * Called when a init phase has completed.
821 *
822 * @returns VBox status code.
823 * @param pVM The VM.
824 * @param enmWhat The phase that completed.
825 */
826VMMR3_INT_DECL(int) HMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
827{
828 switch (enmWhat)
829 {
830 case VMINITCOMPLETED_RING3:
831 return hmR3InitCPU(pVM);
832 case VMINITCOMPLETED_RING0:
833 return hmR3InitFinalizeR0(pVM);
834 default:
835 return VINF_SUCCESS;
836 }
837}
838
839
840/**
841 * Turns off normal raw mode features.
842 *
843 * @param pVM Pointer to the VM.
844 */
845static void hmR3DisableRawMode(PVM pVM)
846{
847 /* Reinit the paging mode to force the new shadow mode. */
848 for (VMCPUID i = 0; i < pVM->cCpus; i++)
849 {
850 PVMCPU pVCpu = &pVM->aCpus[i];
851
852 PGMR3ChangeMode(pVM, pVCpu, PGMMODE_REAL);
853 }
854}
855
856
857/**
858 * Initialize VT-x or AMD-V.
859 *
860 * @returns VBox status code.
861 * @param pVM Pointer to the VM.
862 */
863static int hmR3InitFinalizeR0(PVM pVM)
864{
865 int rc;
866
867 if (!HMIsEnabled(pVM))
868 return VINF_SUCCESS;
869
870 /*
871 * Hack to allow users to work around broken BIOSes that incorrectly set
872 * EFER.SVME, which makes us believe somebody else is already using AMD-V.
873 */
874 if ( !pVM->hm.s.vmx.fSupported
875 && !pVM->hm.s.svm.fSupported
876 && pVM->hm.s.lLastError == VERR_SVM_IN_USE /* implies functional AMD-V */
877 && RTEnvExist("VBOX_HWVIRTEX_IGNORE_SVM_IN_USE"))
878 {
879 LogRel(("HM: VBOX_HWVIRTEX_IGNORE_SVM_IN_USE active!\n"));
880 pVM->hm.s.svm.fSupported = true;
881 pVM->hm.s.svm.fIgnoreInUseError = true;
882 pVM->hm.s.lLastError = VINF_SUCCESS;
883 }
884
885 /*
886 * Report ring-0 init errors.
887 */
888 if ( !pVM->hm.s.vmx.fSupported
889 && !pVM->hm.s.svm.fSupported)
890 {
891 LogRel(("HM: Failed to initialize VT-x / AMD-V: %Rrc\n", pVM->hm.s.lLastError));
892 LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.msr.feature_ctrl));
893 switch (pVM->hm.s.lLastError)
894 {
895 case VERR_VMX_IN_VMX_ROOT_MODE:
896 return VM_SET_ERROR(pVM, VERR_VMX_IN_VMX_ROOT_MODE, "VT-x is being used by another hypervisor.");
897 case VERR_VMX_NO_VMX:
898 return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is not available.");
899 case VERR_VMX_MSR_LOCKED_OR_DISABLED:
900 return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is disabled in the BIOS (or by the host OS).");
901
902 case VERR_SVM_IN_USE:
903 return VM_SET_ERROR(pVM, VERR_SVM_IN_USE, "AMD-V is being used by another hypervisor.");
904 case VERR_SVM_NO_SVM:
905 return VM_SET_ERROR(pVM, VERR_SVM_NO_SVM, "AMD-V is not available.");
906 case VERR_SVM_DISABLED:
907 return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS.");
908 }
909 return VMSetError(pVM, pVM->hm.s.lLastError, RT_SRC_POS, "HM ring-0 init failed: %Rrc", pVM->hm.s.lLastError);
910 }
911
912 /*
913 * Enable VT-x or AMD-V on all host CPUs.
914 */
915 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HM_ENABLE, 0, NULL);
916 if (RT_FAILURE(rc))
917 {
918 LogRel(("HMR3InitFinalize: SUPR3CallVMMR0Ex VMMR0_DO_HM_ENABLE failed with %Rrc\n", rc));
919 return rc;
920 }
921
922 /*
923 * No TPR patching is required when the IO-APIC is not enabled for this VM.
924 * (Main should have taken care of this already)
925 */
926 pVM->hm.s.fHasIoApic = PDMHasIoApic(pVM);
927 if (!pVM->hm.s.fHasIoApic)
928 {
929 Assert(!pVM->hm.s.fTRPPatchingAllowed); /* paranoia */
930 pVM->hm.s.fTRPPatchingAllowed = false;
931 }
932
933 /*
934 * Do the vendor specific initalization .
935 * .
936 * Note! We disable release log buffering here since we're doing relatively .
937 * lot of logging and doesn't want to hit the disk with each LogRel .
938 * statement.
939 */
940 AssertLogRelReturn(!pVM->hm.s.fInitialized, VERR_HM_IPE_5);
941 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
942 if (pVM->hm.s.vmx.fSupported)
943 rc = hmR3InitFinalizeR0Intel(pVM);
944 else
945 rc = hmR3InitFinalizeR0Amd(pVM);
946 LogRel(("HM: VT-x/AMD-V init method: %s\n", (pVM->hm.s.fGlobalInit) ? "GLOBAL" : "LOCAL"));
947 RTLogRelSetBuffering(fOldBuffered);
948 pVM->hm.s.fInitialized = true;
949
950 return rc;
951}
952
953
954/**
955 * Finish VT-x initialization (after ring-0 init).
956 *
957 * @returns VBox status code.
958 * @param pVM The cross context VM structure.
959 */
960static int hmR3InitFinalizeR0Intel(PVM pVM)
961{
962 int rc;
963
964 Log(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported));
965 AssertLogRelReturn(pVM->hm.s.vmx.msr.feature_ctrl != 0, VERR_HM_IPE_4);
966
967 uint64_t val;
968 uint64_t zap;
969 RTGCPHYS GCPhys = 0;
970
971#ifndef VBOX_WITH_OLD_VTX_CODE
972 LogRel(("HM: Using VT-x implementation 2.0!\n"));
973#endif
974 LogRel(("HM: Host CR4 = %08X\n", pVM->hm.s.vmx.hostCR4));
975 LogRel(("HM: MSR_IA32_FEATURE_CONTROL = %RX64\n", pVM->hm.s.vmx.msr.feature_ctrl));
976 LogRel(("HM: MSR_IA32_VMX_BASIC_INFO = %RX64\n", pVM->hm.s.vmx.msr.vmx_basic_info));
977 LogRel(("HM: VMCS id = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info)));
978 LogRel(("HM: VMCS size = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.vmx_basic_info)));
979 LogRel(("HM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hm.s.vmx.msr.vmx_basic_info) ? "< 4 GB" : "None"));
980 LogRel(("HM: VMCS memory type = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hm.s.vmx.msr.vmx_basic_info)));
981 LogRel(("HM: Dual-monitor treatment = %d\n", MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hm.s.vmx.msr.vmx_basic_info)));
982 LogRel(("HM: Max resume loops = %RX32\n", pVM->hm.s.cMaxResumeLoops));
983
984 LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS = %RX64\n", pVM->hm.s.vmx.msr.vmx_pin_ctls.u));
985 val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
986 zap = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
987 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT);
988 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT);
989 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI);
990 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
991
992 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS = %RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls.u));
993 val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
994 zap = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
995 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
996 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING);
997 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
998 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT);
999 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT);
1000 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT);
1001 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT);
1002 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT);
1003 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
1004 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT);
1005 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT);
1006 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
1007 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT);
1008 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
1009 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT);
1010 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_IO_BITMAPS);
1011 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
1012 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS);
1013 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT);
1014 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT);
1015 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL);
1016 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1017 {
1018 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2 = %RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls2.u));
1019 val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
1020 zap = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;
1021 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC);
1022 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_EPT);
1023 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT);
1024 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP);
1025 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_VIRT_X2APIC);
1026 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_VPID);
1027 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT);
1028 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST);
1029 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT);
1030 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT);
1031 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_INVPCID);
1032 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC);
1033 }
1034
1035 LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS = %RX64\n", pVM->hm.s.vmx.msr.vmx_entry.u));
1036 val = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1;
1037 zap = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0;
1038 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG);
1039 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);
1040 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_ENTRY_SMM);
1041 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON);
1042 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR);
1043 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR);
1044 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR);
1045
1046 LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS = %RX64\n", pVM->hm.s.vmx.msr.vmx_exit.u));
1047 val = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1;
1048 zap = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0;
1049 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_SAVE_DEBUG);
1050 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE);
1051 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR);
1052 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_ACK_EXT_INT);
1053 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR);
1054 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR);
1055 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR);
1056 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR);
1057 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER);
1058
1059 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps)
1060 {
1061 val = pVM->hm.s.vmx.msr.vmx_ept_vpid_caps;
1062 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP = %RX64\n", val));
1063 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY);
1064 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_RWX_W_ONLY);
1065 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_RWX_WX_ONLY);
1066 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_GAW_21_BITS);
1067 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_GAW_30_BITS);
1068 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_GAW_39_BITS);
1069 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_GAW_48_BITS);
1070 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_GAW_57_BITS);
1071 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_EMT_UC);
1072 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_EMT_WC);
1073 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_EMT_WT);
1074 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_EMT_WP);
1075 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB);
1076 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_SP_21_BITS);
1077 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_SP_30_BITS);
1078 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_SP_39_BITS);
1079 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_SP_48_BITS);
1080 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVEPT);
1081 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT);
1082 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
1083 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVVPID);
1084 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
1085 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT);
1086 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS);
1087 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS);
1088 }
1089
1090 LogRel(("HM: MSR_IA32_VMX_MISC = %RX64\n", pVM->hm.s.vmx.msr.vmx_misc));
1091 if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.vmx_misc) == pVM->hm.s.vmx.cPreemptTimerShift)
1092 {
1093 LogRel(("HM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT = %x\n",
1094 MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.vmx_misc)));
1095 }
1096 else
1097 {
1098 LogRel(("HM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT = %x - erratum detected, using %x instead\n",
1099 MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.vmx_misc), pVM->hm.s.vmx.cPreemptTimerShift));
1100 }
1101
1102 LogRel(("HM: MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT = %x\n", MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT(pVM->hm.s.vmx.msr.vmx_misc)));
1103 LogRel(("HM: MSR_IA32_VMX_MISC_ACTIVITY_STATES = %x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.msr.vmx_misc)));
1104 LogRel(("HM: MSR_IA32_VMX_MISC_CR3_TARGET = %x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hm.s.vmx.msr.vmx_misc)));
1105 LogRel(("HM: MSR_IA32_VMX_MISC_MAX_MSR = %x\n", MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)));
1106 LogRel(("HM: MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM = %x\n", MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM(pVM->hm.s.vmx.msr.vmx_misc)));
1107 LogRel(("HM: MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2 = %x\n", MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2(pVM->hm.s.vmx.msr.vmx_misc)));
1108 LogRel(("HM: MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO = %x\n", MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO(pVM->hm.s.vmx.msr.vmx_misc)));
1109 LogRel(("HM: MSR_IA32_VMX_MISC_MSEG_ID = %x\n", MSR_IA32_VMX_MISC_MSEG_ID(pVM->hm.s.vmx.msr.vmx_misc)));
1110
1111 /* Paranoia */
1112 AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc) >= 512);
1113
1114 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0 = %RX64\n", pVM->hm.s.vmx.msr.vmx_cr0_fixed0));
1115 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1 = %RX64\n", pVM->hm.s.vmx.msr.vmx_cr0_fixed1));
1116 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0 = %RX64\n", pVM->hm.s.vmx.msr.vmx_cr4_fixed0));
1117 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1 = %RX64\n", pVM->hm.s.vmx.msr.vmx_cr4_fixed1));
1118 LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM = %RX64\n", pVM->hm.s.vmx.msr.vmx_vmcs_enum));
1119 LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX = %x\n", MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX(pVM->hm.s.vmx.msr.vmx_vmcs_enum)));
1120
1121 val = pVM->hm.s.vmx.msr.vmx_vmfunc;
1122 if (val)
1123 {
1124 LogRel(("HM: MSR_A32_VMX_VMFUNC = %RX64\n", val));
1125 HMVMX_REPORT_ALLOWED_FEATURE(val, VMX_VMCS_CTRL_VMFUNC_EPTP_SWITCHING);
1126 }
1127
1128 LogRel(("HM: APIC-access page physaddr = %RHp\n", pVM->hm.s.vmx.HCPhysApicAccess));
1129
1130 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1131 {
1132 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap));
1133 LogRel(("HM: VCPU%3d: VMCS physaddr = %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysVmcs));
1134 }
1135
1136 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
1137 pVM->hm.s.fNestedPaging = pVM->hm.s.fAllowNestedPaging;
1138
1139 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
1140 pVM->hm.s.vmx.fVpid = pVM->hm.s.vmx.fAllowVpid;
1141
1142 /*
1143 * Disallow RDTSCP in the guest if there is no secondary process-based VM execution controls as otherwise
1144 * RDTSCP would cause a #UD. There might be no CPUs out there where this happens, as RDTSCP was introduced
1145 * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel...
1146 */
1147 if ( !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1148 && CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP))
1149 {
1150 CPUMClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
1151 LogRel(("HM: RDTSCP disabled.\n"));
1152 }
1153
1154 /* Unrestricted guest execution also requires EPT. */
1155 if ( pVM->hm.s.vmx.fAllowUnrestricted
1156 && pVM->hm.s.fNestedPaging
1157 && (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST))
1158 {
1159 pVM->hm.s.vmx.fUnrestrictedGuest = true;
1160 }
1161
1162 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
1163 {
1164 /* Allocate three pages for the TSS we need for real mode emulation. (2 pages for the IO bitmap) */
1165 rc = PDMR3VmmDevHeapAlloc(pVM, HM_VTX_TOTAL_DEVHEAP_MEM, (RTR3PTR *)&pVM->hm.s.vmx.pRealModeTSS);
1166 if (RT_SUCCESS(rc))
1167 {
1168 /* The IO bitmap starts right after the virtual interrupt redirection bitmap.
1169 Refer Intel spec. 20.3.3 "Software Interrupt Handling in Virtual-8086 mode"
1170 esp. Figure 20-5.*/
1171 ASMMemZero32(pVM->hm.s.vmx.pRealModeTSS, sizeof(*pVM->hm.s.vmx.pRealModeTSS));
1172 pVM->hm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hm.s.vmx.pRealModeTSS);
1173
1174 /* Bit set to 0 means software interrupts are redirected to the
1175 8086 program interrupt handler rather than switching to
1176 protected-mode handler. */
1177 memset(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap, 0, sizeof(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap));
1178
1179 /* Allow all port IO, so that port IO instructions do not cause
1180 exceptions and would instead cause a VM-exit (based on VT-x's
1181 IO bitmap which we currently configure to always cause an exit). */
1182 memset(pVM->hm.s.vmx.pRealModeTSS + 1, 0, PAGE_SIZE * 2);
1183 *((unsigned char *)pVM->hm.s.vmx.pRealModeTSS + HM_VTX_TSS_SIZE - 2) = 0xff;
1184
1185 /*
1186 * Construct a 1024 element page directory with 4 MB pages for
1187 * the identity mapped page table used in real and protected mode
1188 * without paging with EPT.
1189 */
1190 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hm.s.vmx.pRealModeTSS + PAGE_SIZE * 3);
1191 for (uint32_t i = 0; i < X86_PG_ENTRIES; i++)
1192 {
1193 pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u = _4M * i;
1194 pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US
1195 | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS
1196 | X86_PDE4M_G;
1197 }
1198
1199 /* We convert it here every time as pci regions could be reconfigured. */
1200 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
1201 AssertRCReturn(rc, rc);
1202 LogRel(("HM: Real Mode TSS guest physaddr = %RGp\n", GCPhys));
1203
1204 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
1205 AssertRCReturn(rc, rc);
1206 LogRel(("HM: Non-Paging Mode EPT CR3 = %RGp\n", GCPhys));
1207 }
1208 else
1209 {
1210 /** @todo This cannot possibly work, there are other places which assumes
1211 * this allocation cannot fail (see HMR3CanExecuteGuest()). Make this
1212 * a failure case. */
1213 LogRel(("HM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));
1214 pVM->hm.s.vmx.pRealModeTSS = NULL;
1215 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = NULL;
1216 }
1217 }
1218
1219 /*
1220 * Call ring-0 to set up the VM.
1221 */
1222 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HM_SETUP_VM, 0, NULL);
1223 if (rc != VINF_SUCCESS)
1224 {
1225 AssertMsgFailed(("%Rrc\n", rc));
1226 LogRel(("HM: VMX setup failed with rc=%Rrc!\n", rc));
1227 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1228 LogRel(("HM: CPU[%ld] Last instruction error %x\n", i, pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError));
1229 return VMSetError(pVM, rc, RT_SRC_POS, "VT-x setup failed: %Rrc", rc);
1230 }
1231
1232 LogRel(("HM: VMX enabled!\n"));
1233 pVM->hm.s.vmx.fEnabled = true;
1234
1235 hmR3DisableRawMode(pVM); /** @todo make this go away! */
1236
1237 /*
1238 * Change the CPU features.
1239 */
1240 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1241 if (pVM->hm.s.fAllow64BitGuests)
1242 {
1243 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1244 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1245 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* 64 bits only on Intel CPUs */
1246 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1247 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1248#if 0 /** @todo r=bird: This ain't making any sense whatsoever. */
1249#if RT_ARCH_X86
1250 if ( !CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE)
1251 || !(pVM->hm.s.vmx.hostEFER & MSR_K6_EFER_NXE))
1252 LogRel(("NX is only supported for 64-bit guests!\n"));
1253#endif
1254#endif
1255 }
1256 /* Turn on NXE if PAE has been enabled *and* the host has turned on NXE
1257 (we reuse the host EFER in the switcher). */
1258 /** @todo this needs to be fixed properly!! */
1259 else if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE)
1260 && (pVM->hm.s.vmx.hostEFER & MSR_K6_EFER_NXE))
1261 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1262 else
1263 LogRel(("HM: NX not supported by the host.\n"));
1264
1265 /*
1266 * Log configuration details.
1267 */
1268 LogRel((pVM->hm.s.fAllow64BitGuests
1269 ? "HM: Guest support: 32-bit and 64-bit.\n"
1270 : "HM: Guest support: 32-bit only.\n"));
1271 if (pVM->hm.s.fNestedPaging)
1272 {
1273 LogRel(("HM: Nested paging enabled!\n"));
1274 LogRel(("HM: EPT root page physaddr = %RHp\n", PGMGetHyperCR3(VMMGetCpu(pVM))));
1275 if (pVM->hm.s.vmx.enmFlushEpt == VMX_FLUSH_EPT_SINGLE_CONTEXT)
1276 LogRel(("HM: EPT flush type = VMX_FLUSH_EPT_SINGLE_CONTEXT\n"));
1277 else if (pVM->hm.s.vmx.enmFlushEpt == VMX_FLUSH_EPT_ALL_CONTEXTS)
1278 LogRel(("HM: EPT flush type = VMX_FLUSH_EPT_ALL_CONTEXTS\n"));
1279 else if (pVM->hm.s.vmx.enmFlushEpt == VMX_FLUSH_EPT_NOT_SUPPORTED)
1280 LogRel(("HM: EPT flush type = VMX_FLUSH_EPT_NOT_SUPPORTED\n"));
1281 else
1282 LogRel(("HM: EPT flush type = %d\n", pVM->hm.s.vmx.enmFlushEpt));
1283
1284 if (pVM->hm.s.vmx.fUnrestrictedGuest)
1285 LogRel(("HM: Unrestricted guest execution enabled!\n"));
1286
1287#if HC_ARCH_BITS == 64
1288 if (pVM->hm.s.fLargePages)
1289 {
1290 /* Use large (2 MB) pages for our EPT PDEs where possible. */
1291 PGMSetLargePageUsage(pVM, true);
1292 LogRel(("HM: Large page support enabled!\n"));
1293 }
1294#endif
1295 }
1296 else
1297 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest);
1298
1299 if (pVM->hm.s.vmx.fVpid)
1300 {
1301 LogRel(("HM: VPID enabled!\n"));
1302 if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_INDIV_ADDR)
1303 LogRel(("HM: VPID flush type = VMX_FLUSH_VPID_INDIV_ADDR\n"));
1304 else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_SINGLE_CONTEXT)
1305 LogRel(("HM: VPID flush type = VMX_FLUSH_VPID_SINGLE_CONTEXT\n"));
1306 else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_ALL_CONTEXTS)
1307 LogRel(("HM: VPID flush type = VMX_FLUSH_VPID_ALL_CONTEXTS\n"));
1308 else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
1309 LogRel(("HM: VPID flush type = VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS\n"));
1310 else
1311 LogRel(("HM: VPID flush type = %d\n", pVM->hm.s.vmx.enmFlushVpid));
1312 }
1313 else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_NOT_SUPPORTED)
1314 LogRel(("HM: Ignoring VPID capabilities of CPU.\n"));
1315
1316 /** TPR patching would never have worked on Intel. Leaving it here for the old
1317 * code's sake. See @bugref{6398}. */
1318#ifdef VBOX_WITH_OLD_VTX_CODE
1319 /*
1320 * TPR patching status logging.
1321 */
1322 if (pVM->hm.s.fTRPPatchingAllowed)
1323 {
1324 if ( (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1325 && (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
1326 {
1327 pVM->hm.s.fTRPPatchingAllowed = false; /* not necessary as we have a hardware solution. */
1328 LogRel(("HM: TPR Patching not required (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).\n"));
1329 }
1330 else
1331 {
1332 uint32_t u32Eax, u32Dummy;
1333
1334 /* TPR patching needs access to the MSR_K8_LSTAR msr. */
1335 ASMCpuId(0x80000000, &u32Eax, &u32Dummy, &u32Dummy, &u32Dummy);
1336 if ( u32Eax < 0x80000001
1337 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
1338 {
1339 pVM->hm.s.fTRPPatchingAllowed = false;
1340 LogRel(("HM: TPR patching disabled (long mode not supported).\n"));
1341 }
1342 }
1343 }
1344 LogRel(("HM: TPR Patching %s.\n", (pVM->hm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
1345#endif
1346
1347
1348 /*
1349 * Check for preemption timer config override and log the state of it.
1350 */
1351 if (pVM->hm.s.vmx.fUsePreemptTimer)
1352 {
1353 PCFGMNODE pCfgHm = CFGMR3GetChild(CFGMR3GetRoot(pVM), "HM");
1354 rc = CFGMR3QueryBoolDef(pCfgHm, "UsePreemptTimer", &pVM->hm.s.vmx.fUsePreemptTimer, true);
1355 AssertLogRelRCReturn(rc, rc);
1356 }
1357 if (pVM->hm.s.vmx.fUsePreemptTimer)
1358 LogRel(("HM: VMX-preemption timer enabled (cPreemptTimerShift=%u).\n", pVM->hm.s.vmx.cPreemptTimerShift));
1359 else
1360 LogRel(("HM: VMX-preemption timer disabled.\n"));
1361
1362 return VINF_SUCCESS;
1363}
1364
1365
1366/**
1367 * Finish AMD-V initialization (after ring-0 init).
1368 *
1369 * @returns VBox status code.
1370 * @param pVM The cross context VM structure.
1371 */
1372static int hmR3InitFinalizeR0Amd(PVM pVM)
1373{
1374 Log(("pVM->hm.s.svm.fSupported = %d\n", pVM->hm.s.svm.fSupported));
1375
1376#ifndef VBOX_WITH_OLD_AMDV_CODE
1377 LogRel(("HM: Using AMD-V implementation 2.0!\n"));
1378#endif
1379
1380 uint32_t u32Family;
1381 uint32_t u32Model;
1382 uint32_t u32Stepping;
1383 if (HMAmdIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
1384 LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
1385 LogRel(("HM: CPUID 0x80000001.u32AMDFeatureECX = %RX32\n", pVM->hm.s.cpuid.u32AMDFeatureECX));
1386 LogRel(("HM: CPUID 0x80000001.u32AMDFeatureEDX = %RX32\n", pVM->hm.s.cpuid.u32AMDFeatureEDX));
1387 LogRel(("HM: AMD HWCR MSR = %RX64\n", pVM->hm.s.svm.msrHwcr));
1388 LogRel(("HM: AMD-V revision = %X\n", pVM->hm.s.svm.u32Rev));
1389 LogRel(("HM: AMD-V max ASID = %RU32\n", pVM->hm.s.uMaxAsid));
1390 LogRel(("HM: AMD-V features = %X\n", pVM->hm.s.svm.u32Features));
1391
1392 /*
1393 * Enumerate AMD-V features.
1394 */
1395 static const struct { uint32_t fFlag; const char *pszName; } s_aSvmFeatures[] =
1396 {
1397#define HMSVM_REPORT_FEATURE(a_Define) { a_Define, #a_Define }
1398 HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
1399 HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT),
1400 HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK),
1401 HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE),
1402 HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR),
1403 HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN),
1404 HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID),
1405 HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST),
1406 HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER),
1407 HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD),
1408 HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_AVIC),
1409#undef HMSVM_REPORT_FEATURE
1410 };
1411
1412 uint32_t fSvmFeatures = pVM->hm.s.svm.u32Features;
1413 for (unsigned i = 0; i < RT_ELEMENTS(s_aSvmFeatures); i++)
1414 if (fSvmFeatures & s_aSvmFeatures[i].fFlag)
1415 {
1416 LogRel(("HM: %s\n", s_aSvmFeatures[i].pszName));
1417 fSvmFeatures &= ~s_aSvmFeatures[i].fFlag;
1418 }
1419 if (fSvmFeatures)
1420 for (unsigned iBit = 0; iBit < 32; iBit++)
1421 if (RT_BIT_32(iBit) & fSvmFeatures)
1422 LogRel(("HM: Reserved bit %u\n", iBit));
1423
1424 /*
1425 * Adjust feature(s).
1426 */
1427 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
1428 pVM->hm.s.fNestedPaging = pVM->hm.s.fAllowNestedPaging;
1429
1430 /*
1431 * Call ring-0 to set up the VM.
1432 */
1433 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HM_SETUP_VM, 0, NULL);
1434 if (rc != VINF_SUCCESS)
1435 {
1436 AssertMsgFailed(("%Rrc\n", rc));
1437 LogRel(("HM: AMD-V setup failed with rc=%Rrc!\n", rc));
1438 return VMSetError(pVM, rc, RT_SRC_POS, "AMD-V setup failed: %Rrc", rc);
1439 }
1440
1441 LogRel(("HM: AMD-V enabled!\n"));
1442 pVM->hm.s.svm.fEnabled = true;
1443
1444 if (pVM->hm.s.fNestedPaging)
1445 {
1446 LogRel(("HM: Nested paging enabled!\n"));
1447
1448 /*
1449 * Enable large pages (2 MB) if applicable.
1450 */
1451#if HC_ARCH_BITS == 64
1452 if (pVM->hm.s.fLargePages)
1453 {
1454 PGMSetLargePageUsage(pVM, true);
1455 LogRel(("HM: Large page support enabled!\n"));
1456 }
1457#endif
1458 }
1459
1460 hmR3DisableRawMode(pVM);
1461
1462 /*
1463 * Change the CPU features.
1464 */
1465 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1466 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);
1467 if (pVM->hm.s.fAllow64BitGuests)
1468 {
1469 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1470 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1471 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1472 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1473 }
1474 /* Turn on NXE if PAE has been enabled. */
1475 else if (CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
1476 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1477
1478 LogRel(("HM: TPR Patching %s.\n", (pVM->hm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
1479
1480 LogRel((pVM->hm.s.fAllow64BitGuests
1481 ? "HM: Guest support: 32-bit and 64-bit.\n"
1482 : "HM: Guest support: 32-bit only.\n"));
1483
1484 return VINF_SUCCESS;
1485}
1486
1487
1488/**
1489 * Applies relocations to data and code managed by this
1490 * component. This function will be called at init and
1491 * whenever the VMM need to relocate it self inside the GC.
1492 *
1493 * @param pVM The VM.
1494 */
1495VMMR3_INT_DECL(void) HMR3Relocate(PVM pVM)
1496{
1497 Log(("HMR3Relocate to %RGv\n", MMHyperGetArea(pVM, 0)));
1498
1499 /* Fetch the current paging mode during the relocate callback during state loading. */
1500 if (VMR3GetState(pVM) == VMSTATE_LOADING)
1501 {
1502 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1503 {
1504 PVMCPU pVCpu = &pVM->aCpus[i];
1505
1506 pVCpu->hm.s.enmShadowMode = PGMGetShadowMode(pVCpu);
1507#ifdef VBOX_WITH_OLD_VTX_CODE
1508 Assert(pVCpu->hm.s.vmx.enmCurrGuestMode == PGMGetGuestMode(pVCpu));
1509 pVCpu->hm.s.vmx.enmCurrGuestMode = PGMGetGuestMode(pVCpu);
1510#endif
1511 }
1512 }
1513#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1514 if (HMIsEnabled(pVM))
1515 {
1516 switch (PGMGetHostMode(pVM))
1517 {
1518 case PGMMODE_32_BIT:
1519 pVM->hm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_32_TO_AMD64);
1520 break;
1521
1522 case PGMMODE_PAE:
1523 case PGMMODE_PAE_NX:
1524 pVM->hm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_PAE_TO_AMD64);
1525 break;
1526
1527 default:
1528 AssertFailed();
1529 break;
1530 }
1531 }
1532#endif
1533 return;
1534}
1535
1536
1537/**
1538 * Notification callback which is called whenever there is a chance that a CR3
1539 * value might have changed.
1540 *
1541 * This is called by PGM.
1542 *
1543 * @param pVM Pointer to the VM.
1544 * @param pVCpu Pointer to the VMCPU.
1545 * @param enmShadowMode New shadow paging mode.
1546 * @param enmGuestMode New guest paging mode.
1547 */
1548VMMR3_INT_DECL(void) HMR3PagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
1549{
1550 /* Ignore page mode changes during state loading. */
1551 if (VMR3GetState(pVCpu->pVMR3) == VMSTATE_LOADING)
1552 return;
1553
1554 pVCpu->hm.s.enmShadowMode = enmShadowMode;
1555
1556#ifdef VBOX_WITH_OLD_VTX_CODE
1557 if ( pVM->hm.s.vmx.fEnabled
1558 && HMIsEnabled(pVM))
1559 {
1560 if ( pVCpu->hm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
1561 && enmGuestMode >= PGMMODE_PROTECTED)
1562 {
1563 PCPUMCTX pCtx;
1564
1565 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1566
1567 /* After a real mode switch to protected mode we must force
1568 CPL to 0. Our real mode emulation had to set it to 3. */
1569 pCtx->ss.Attr.n.u2Dpl = 0;
1570 }
1571 }
1572
1573 if (pVCpu->hm.s.vmx.enmCurrGuestMode != enmGuestMode)
1574 {
1575 /* Keep track of paging mode changes. */
1576 pVCpu->hm.s.vmx.enmPrevGuestMode = pVCpu->hm.s.vmx.enmCurrGuestMode;
1577 pVCpu->hm.s.vmx.enmCurrGuestMode = enmGuestMode;
1578
1579 /* Did we miss a change, because all code was executed in the recompiler? */
1580 if (pVCpu->hm.s.vmx.enmLastSeenGuestMode == enmGuestMode)
1581 {
1582 Log(("HMR3PagingModeChanged missed %s->%s transition (prev %s)\n", PGMGetModeName(pVCpu->hm.s.vmx.enmPrevGuestMode),
1583 PGMGetModeName(pVCpu->hm.s.vmx.enmCurrGuestMode), PGMGetModeName(pVCpu->hm.s.vmx.enmLastSeenGuestMode)));
1584 pVCpu->hm.s.vmx.enmLastSeenGuestMode = pVCpu->hm.s.vmx.enmPrevGuestMode;
1585 }
1586 }
1587#else
1588 /* If the guest left protected mode VMX execution, we'll have to be extra
1589 * careful if/when the guest switches back to protected mode.
1590 */
1591 if (enmGuestMode == PGMMODE_REAL)
1592 {
1593 Log(("HMR3PagingModeChanged indicates real mode execution\n"));
1594 pVCpu->hm.s.vmx.fWasInRealMode = true;
1595 }
1596#endif
1597
1598 /** @todo r=ramshankar: Why do we need to do this? */
1599#ifdef VMX_USE_CACHED_VMCS_ACCESSES
1600 /* Reset the contents of the read cache. */
1601 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
1602 for (unsigned j = 0; j < pCache->Read.cValidEntries; j++)
1603 pCache->Read.aFieldVal[j] = 0;
1604#endif
1605}
1606
1607
1608/**
1609 * Terminates the HM.
1610 *
1611 * Termination means cleaning up and freeing all resources,
1612 * the VM itself is, at this point, powered off or suspended.
1613 *
1614 * @returns VBox status code.
1615 * @param pVM Pointer to the VM.
1616 */
1617VMMR3_INT_DECL(int) HMR3Term(PVM pVM)
1618{
1619 if (pVM->hm.s.vmx.pRealModeTSS)
1620 {
1621 PDMR3VmmDevHeapFree(pVM, pVM->hm.s.vmx.pRealModeTSS);
1622 pVM->hm.s.vmx.pRealModeTSS = 0;
1623 }
1624 hmR3TermCPU(pVM);
1625 return 0;
1626}
1627
1628
1629/**
1630 * Terminates the per-VCPU HM.
1631 *
1632 * @returns VBox status code.
1633 * @param pVM Pointer to the VM.
1634 */
1635static int hmR3TermCPU(PVM pVM)
1636{
1637 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1638 {
1639 PVMCPU pVCpu = &pVM->aCpus[i]; NOREF(pVCpu);
1640
1641#ifdef VBOX_WITH_STATISTICS
1642 if (pVCpu->hm.s.paStatExitReason)
1643 {
1644 MMHyperFree(pVM, pVCpu->hm.s.paStatExitReason);
1645 pVCpu->hm.s.paStatExitReason = NULL;
1646 pVCpu->hm.s.paStatExitReasonR0 = NIL_RTR0PTR;
1647 }
1648 if (pVCpu->hm.s.paStatInjectedIrqs)
1649 {
1650 MMHyperFree(pVM, pVCpu->hm.s.paStatInjectedIrqs);
1651 pVCpu->hm.s.paStatInjectedIrqs = NULL;
1652 pVCpu->hm.s.paStatInjectedIrqsR0 = NIL_RTR0PTR;
1653 }
1654#endif
1655
1656#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1657 memset(pVCpu->hm.s.vmx.VMCSCache.aMagic, 0, sizeof(pVCpu->hm.s.vmx.VMCSCache.aMagic));
1658 pVCpu->hm.s.vmx.VMCSCache.uMagic = 0;
1659 pVCpu->hm.s.vmx.VMCSCache.uPos = 0xffffffff;
1660#endif
1661 }
1662 return 0;
1663}
1664
1665
1666/**
1667 * Resets a virtual CPU.
1668 *
1669 * Used by HMR3Reset and CPU hot plugging.
1670 *
1671 * @param pVCpu The CPU to reset.
1672 */
1673VMMR3_INT_DECL(void) HMR3ResetCpu(PVMCPU pVCpu)
1674{
1675 /* On first entry we'll sync everything. */
1676 pVCpu->hm.s.fContextUseFlags = (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST);
1677
1678 pVCpu->hm.s.vmx.u32CR0Mask = 0;
1679 pVCpu->hm.s.vmx.u32CR4Mask = 0;
1680
1681 pVCpu->hm.s.fActive = false;
1682 pVCpu->hm.s.Event.fPending = false;
1683
1684#ifdef VBOX_WITH_OLD_VTX_CODE
1685 /* Reset state information for real-mode emulation in VT-x. */
1686 pVCpu->hm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
1687 pVCpu->hm.s.vmx.enmPrevGuestMode = PGMMODE_REAL;
1688 pVCpu->hm.s.vmx.enmCurrGuestMode = PGMMODE_REAL;
1689#else
1690 pVCpu->hm.s.vmx.fWasInRealMode = true;
1691#endif
1692
1693 /* Reset the contents of the read cache. */
1694 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
1695 for (unsigned j = 0; j < pCache->Read.cValidEntries; j++)
1696 pCache->Read.aFieldVal[j] = 0;
1697
1698#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1699 /* Magic marker for searching in crash dumps. */
1700 strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
1701 pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
1702#endif
1703}
1704
1705
1706/**
1707 * The VM is being reset.
1708 *
1709 * For the HM component this means that any GDT/LDT/TSS monitors
1710 * needs to be removed.
1711 *
1712 * @param pVM Pointer to the VM.
1713 */
1714VMMR3_INT_DECL(void) HMR3Reset(PVM pVM)
1715{
1716 LogFlow(("HMR3Reset:\n"));
1717
1718 if (HMIsEnabled(pVM))
1719 hmR3DisableRawMode(pVM);
1720
1721 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1722 {
1723 PVMCPU pVCpu = &pVM->aCpus[i];
1724
1725 HMR3ResetCpu(pVCpu);
1726 }
1727
1728 /* Clear all patch information. */
1729 pVM->hm.s.pGuestPatchMem = 0;
1730 pVM->hm.s.pFreeGuestPatchMem = 0;
1731 pVM->hm.s.cbGuestPatchMem = 0;
1732 pVM->hm.s.cPatches = 0;
1733 pVM->hm.s.PatchTree = 0;
1734 pVM->hm.s.fTPRPatchingActive = false;
1735 ASMMemZero32(pVM->hm.s.aPatches, sizeof(pVM->hm.s.aPatches));
1736}
1737
1738
1739/**
1740 * Callback to patch a TPR instruction (vmmcall or mov cr8).
1741 *
1742 * @returns VBox strict status code.
1743 * @param pVM Pointer to the VM.
1744 * @param pVCpu The VMCPU for the EMT we're being called on.
1745 * @param pvUser Unused.
1746 */
1747DECLCALLBACK(VBOXSTRICTRC) hmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
1748{
1749 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
1750
1751 /* Only execute the handler on the VCPU the original patch request was issued. */
1752 if (pVCpu->idCpu != idCpu)
1753 return VINF_SUCCESS;
1754
1755 Log(("hmR3RemovePatches\n"));
1756 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
1757 {
1758 uint8_t abInstr[15];
1759 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
1760 RTGCPTR pInstrGC = (RTGCPTR)pPatch->Core.Key;
1761 int rc;
1762
1763#ifdef LOG_ENABLED
1764 char szOutput[256];
1765
1766 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
1767 szOutput, sizeof(szOutput), NULL);
1768 if (RT_SUCCESS(rc))
1769 Log(("Patched instr: %s\n", szOutput));
1770#endif
1771
1772 /* Check if the instruction is still the same. */
1773 rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pInstrGC, pPatch->cbNewOp);
1774 if (rc != VINF_SUCCESS)
1775 {
1776 Log(("Patched code removed? (rc=%Rrc0\n", rc));
1777 continue; /* swapped out or otherwise removed; skip it. */
1778 }
1779
1780 if (memcmp(abInstr, pPatch->aNewOpcode, pPatch->cbNewOp))
1781 {
1782 Log(("Patched instruction was changed! (rc=%Rrc0\n", rc));
1783 continue; /* skip it. */
1784 }
1785
1786 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pInstrGC, pPatch->aOpcode, pPatch->cbOp);
1787 AssertRC(rc);
1788
1789#ifdef LOG_ENABLED
1790 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
1791 szOutput, sizeof(szOutput), NULL);
1792 if (RT_SUCCESS(rc))
1793 Log(("Original instr: %s\n", szOutput));
1794#endif
1795 }
1796 pVM->hm.s.cPatches = 0;
1797 pVM->hm.s.PatchTree = 0;
1798 pVM->hm.s.pFreeGuestPatchMem = pVM->hm.s.pGuestPatchMem;
1799 pVM->hm.s.fTPRPatchingActive = false;
1800 return VINF_SUCCESS;
1801}
1802
1803
1804/**
1805 * Worker for enabling patching in a VT-x/AMD-V guest.
1806 *
1807 * @returns VBox status code.
1808 * @param pVM Pointer to the VM.
1809 * @param idCpu VCPU to execute hmR3RemovePatches on.
1810 * @param pPatchMem Patch memory range.
1811 * @param cbPatchMem Size of the memory range.
1812 */
1813static int hmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
1814{
1815 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches, (void *)(uintptr_t)idCpu);
1816 AssertRC(rc);
1817
1818 pVM->hm.s.pGuestPatchMem = pPatchMem;
1819 pVM->hm.s.pFreeGuestPatchMem = pPatchMem;
1820 pVM->hm.s.cbGuestPatchMem = cbPatchMem;
1821 return VINF_SUCCESS;
1822}
1823
1824
1825/**
1826 * Enable patching in a VT-x/AMD-V guest
1827 *
1828 * @returns VBox status code.
1829 * @param pVM Pointer to the VM.
1830 * @param pPatchMem Patch memory range.
1831 * @param cbPatchMem Size of the memory range.
1832 */
1833VMMR3_INT_DECL(int) HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1834{
1835 VM_ASSERT_EMT(pVM);
1836 Log(("HMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
1837 if (pVM->cCpus > 1)
1838 {
1839 /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
1840 int rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE,
1841 (PFNRT)hmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
1842 AssertRC(rc);
1843 return rc;
1844 }
1845 return hmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
1846}
1847
1848
1849/**
1850 * Disable patching in a VT-x/AMD-V guest.
1851 *
1852 * @returns VBox status code.
1853 * @param pVM Pointer to the VM.
1854 * @param pPatchMem Patch memory range.
1855 * @param cbPatchMem Size of the memory range.
1856 */
1857VMMR3_INT_DECL(int) HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1858{
1859 Log(("HMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
1860
1861 Assert(pVM->hm.s.pGuestPatchMem == pPatchMem);
1862 Assert(pVM->hm.s.cbGuestPatchMem == cbPatchMem);
1863
1864 /* @todo Potential deadlock when other VCPUs are waiting on the IOM lock (we own it)!! */
1865 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches,
1866 (void *)(uintptr_t)VMMGetCpuId(pVM));
1867 AssertRC(rc);
1868
1869 pVM->hm.s.pGuestPatchMem = 0;
1870 pVM->hm.s.pFreeGuestPatchMem = 0;
1871 pVM->hm.s.cbGuestPatchMem = 0;
1872 pVM->hm.s.fTPRPatchingActive = false;
1873 return VINF_SUCCESS;
1874}
1875
1876
1877/**
1878 * Callback to patch a TPR instruction (vmmcall or mov cr8).
1879 *
1880 * @returns VBox strict status code.
1881 * @param pVM Pointer to the VM.
1882 * @param pVCpu The VMCPU for the EMT we're being called on.
1883 * @param pvUser User specified CPU context.
1884 *
1885 */
1886DECLCALLBACK(VBOXSTRICTRC) hmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
1887{
1888 /*
1889 * Only execute the handler on the VCPU the original patch request was
1890 * issued. (The other CPU(s) might not yet have switched to protected
1891 * mode, nor have the correct memory context.)
1892 */
1893 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
1894 if (pVCpu->idCpu != idCpu)
1895 return VINF_SUCCESS;
1896
1897 /*
1898 * We're racing other VCPUs here, so don't try patch the instruction twice
1899 * and make sure there is still room for our patch record.
1900 */
1901 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1902 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
1903 if (pPatch)
1904 {
1905 Log(("hmR3ReplaceTprInstr: already patched %RGv\n", pCtx->rip));
1906 return VINF_SUCCESS;
1907 }
1908 uint32_t const idx = pVM->hm.s.cPatches;
1909 if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
1910 {
1911 Log(("hmR3ReplaceTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
1912 return VINF_SUCCESS;
1913 }
1914 pPatch = &pVM->hm.s.aPatches[idx];
1915
1916 Log(("hmR3ReplaceTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
1917
1918 /*
1919 * Disassembler the instruction and get cracking.
1920 */
1921 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "hmR3ReplaceTprInstr");
1922 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
1923 uint32_t cbOp;
1924 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1925 AssertRC(rc);
1926 if ( rc == VINF_SUCCESS
1927 && pDis->pCurInstr->uOpcode == OP_MOV
1928 && cbOp >= 3)
1929 {
1930 static uint8_t const s_abVMMCall[3] = { 0x0f, 0x01, 0xd9 };
1931
1932 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
1933 AssertRC(rc);
1934
1935 pPatch->cbOp = cbOp;
1936
1937 if (pDis->Param1.fUse == DISUSE_DISPLACEMENT32)
1938 {
1939 /* write. */
1940 if (pDis->Param2.fUse == DISUSE_REG_GEN32)
1941 {
1942 pPatch->enmType = HMTPRINSTR_WRITE_REG;
1943 pPatch->uSrcOperand = pDis->Param2.Base.idxGenReg;
1944 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_REG %u\n", pDis->Param2.Base.idxGenReg));
1945 }
1946 else
1947 {
1948 Assert(pDis->Param2.fUse == DISUSE_IMMEDIATE32);
1949 pPatch->enmType = HMTPRINSTR_WRITE_IMM;
1950 pPatch->uSrcOperand = pDis->Param2.uValue;
1951 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_IMM %#llx\n", pDis->Param2.uValue));
1952 }
1953 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
1954 AssertRC(rc);
1955
1956 memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
1957 pPatch->cbNewOp = sizeof(s_abVMMCall);
1958 }
1959 else
1960 {
1961 /*
1962 * TPR Read.
1963 *
1964 * Found:
1965 * mov eax, dword [fffe0080] (5 bytes)
1966 * Check if next instruction is:
1967 * shr eax, 4
1968 */
1969 Assert(pDis->Param1.fUse == DISUSE_REG_GEN32);
1970
1971 uint8_t const idxMmioReg = pDis->Param1.Base.idxGenReg;
1972 uint8_t const cbOpMmio = cbOp;
1973 uint64_t const uSavedRip = pCtx->rip;
1974
1975 pCtx->rip += cbOp;
1976 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1977 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Following read");
1978 pCtx->rip = uSavedRip;
1979
1980 if ( rc == VINF_SUCCESS
1981 && pDis->pCurInstr->uOpcode == OP_SHR
1982 && pDis->Param1.fUse == DISUSE_REG_GEN32
1983 && pDis->Param1.Base.idxGenReg == idxMmioReg
1984 && pDis->Param2.fUse == DISUSE_IMMEDIATE8
1985 && pDis->Param2.uValue == 4
1986 && cbOpMmio + cbOp < sizeof(pVM->hm.s.aPatches[idx].aOpcode))
1987 {
1988 uint8_t abInstr[15];
1989
1990 /* Replacing the two instructions above with an AMD-V specific lock-prefixed 32-bit MOV CR8 instruction so as to
1991 access CR8 in 32-bit mode and not cause a #VMEXIT. */
1992 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pPatch->aOpcode, pCtx->rip, cbOpMmio + cbOp);
1993 AssertRC(rc);
1994
1995 pPatch->cbOp = cbOpMmio + cbOp;
1996
1997 /* 0xF0, 0x0F, 0x20, 0xC0 = mov eax, cr8 */
1998 abInstr[0] = 0xF0;
1999 abInstr[1] = 0x0F;
2000 abInstr[2] = 0x20;
2001 abInstr[3] = 0xC0 | pDis->Param1.Base.idxGenReg;
2002 for (unsigned i = 4; i < pPatch->cbOp; i++)
2003 abInstr[i] = 0x90; /* nop */
2004
2005 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, abInstr, pPatch->cbOp);
2006 AssertRC(rc);
2007
2008 memcpy(pPatch->aNewOpcode, abInstr, pPatch->cbOp);
2009 pPatch->cbNewOp = pPatch->cbOp;
2010
2011 Log(("Acceptable read/shr candidate!\n"));
2012 pPatch->enmType = HMTPRINSTR_READ_SHR4;
2013 }
2014 else
2015 {
2016 pPatch->enmType = HMTPRINSTR_READ;
2017 pPatch->uDstOperand = idxMmioReg;
2018
2019 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
2020 AssertRC(rc);
2021
2022 memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
2023 pPatch->cbNewOp = sizeof(s_abVMMCall);
2024 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_READ %u\n", pPatch->uDstOperand));
2025 }
2026 }
2027
2028 pPatch->Core.Key = pCtx->eip;
2029 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2030 AssertRC(rc);
2031
2032 pVM->hm.s.cPatches++;
2033 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccess);
2034 return VINF_SUCCESS;
2035 }
2036
2037 /*
2038 * Save invalid patch, so we will not try again.
2039 */
2040 Log(("hmR3ReplaceTprInstr: Failed to patch instr!\n"));
2041 pPatch->Core.Key = pCtx->eip;
2042 pPatch->enmType = HMTPRINSTR_INVALID;
2043 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2044 AssertRC(rc);
2045 pVM->hm.s.cPatches++;
2046 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceFailure);
2047 return VINF_SUCCESS;
2048}
2049
2050
2051/**
2052 * Callback to patch a TPR instruction (jump to generated code).
2053 *
2054 * @returns VBox strict status code.
2055 * @param pVM Pointer to the VM.
2056 * @param pVCpu The VMCPU for the EMT we're being called on.
2057 * @param pvUser User specified CPU context.
2058 *
2059 */
2060DECLCALLBACK(VBOXSTRICTRC) hmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
2061{
2062 /*
2063 * Only execute the handler on the VCPU the original patch request was
2064 * issued. (The other CPU(s) might not yet have switched to protected
2065 * mode, nor have the correct memory context.)
2066 */
2067 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
2068 if (pVCpu->idCpu != idCpu)
2069 return VINF_SUCCESS;
2070
2071 /*
2072 * We're racing other VCPUs here, so don't try patch the instruction twice
2073 * and make sure there is still room for our patch record.
2074 */
2075 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2076 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
2077 if (pPatch)
2078 {
2079 Log(("hmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
2080 return VINF_SUCCESS;
2081 }
2082 uint32_t const idx = pVM->hm.s.cPatches;
2083 if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
2084 {
2085 Log(("hmR3PatchTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
2086 return VINF_SUCCESS;
2087 }
2088 pPatch = &pVM->hm.s.aPatches[idx];
2089
2090 Log(("hmR3PatchTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
2091 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "hmR3PatchTprInstr");
2092
2093 /*
2094 * Disassemble the instruction and get cracking.
2095 */
2096 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
2097 uint32_t cbOp;
2098 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
2099 AssertRC(rc);
2100 if ( rc == VINF_SUCCESS
2101 && pDis->pCurInstr->uOpcode == OP_MOV
2102 && cbOp >= 5)
2103 {
2104 uint8_t aPatch[64];
2105 uint32_t off = 0;
2106
2107 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
2108 AssertRC(rc);
2109
2110 pPatch->cbOp = cbOp;
2111 pPatch->enmType = HMTPRINSTR_JUMP_REPLACEMENT;
2112
2113 if (pDis->Param1.fUse == DISUSE_DISPLACEMENT32)
2114 {
2115 /*
2116 * TPR write:
2117 *
2118 * push ECX [51]
2119 * push EDX [52]
2120 * push EAX [50]
2121 * xor EDX,EDX [31 D2]
2122 * mov EAX,EAX [89 C0]
2123 * or
2124 * mov EAX,0000000CCh [B8 CC 00 00 00]
2125 * mov ECX,0C0000082h [B9 82 00 00 C0]
2126 * wrmsr [0F 30]
2127 * pop EAX [58]
2128 * pop EDX [5A]
2129 * pop ECX [59]
2130 * jmp return_address [E9 return_address]
2131 *
2132 */
2133 bool fUsesEax = (pDis->Param2.fUse == DISUSE_REG_GEN32 && pDis->Param2.Base.idxGenReg == DISGREG_EAX);
2134
2135 aPatch[off++] = 0x51; /* push ecx */
2136 aPatch[off++] = 0x52; /* push edx */
2137 if (!fUsesEax)
2138 aPatch[off++] = 0x50; /* push eax */
2139 aPatch[off++] = 0x31; /* xor edx, edx */
2140 aPatch[off++] = 0xD2;
2141 if (pDis->Param2.fUse == DISUSE_REG_GEN32)
2142 {
2143 if (!fUsesEax)
2144 {
2145 aPatch[off++] = 0x89; /* mov eax, src_reg */
2146 aPatch[off++] = MAKE_MODRM(3, pDis->Param2.Base.idxGenReg, DISGREG_EAX);
2147 }
2148 }
2149 else
2150 {
2151 Assert(pDis->Param2.fUse == DISUSE_IMMEDIATE32);
2152 aPatch[off++] = 0xB8; /* mov eax, immediate */
2153 *(uint32_t *)&aPatch[off] = pDis->Param2.uValue;
2154 off += sizeof(uint32_t);
2155 }
2156 aPatch[off++] = 0xB9; /* mov ecx, 0xc0000082 */
2157 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
2158 off += sizeof(uint32_t);
2159
2160 aPatch[off++] = 0x0F; /* wrmsr */
2161 aPatch[off++] = 0x30;
2162 if (!fUsesEax)
2163 aPatch[off++] = 0x58; /* pop eax */
2164 aPatch[off++] = 0x5A; /* pop edx */
2165 aPatch[off++] = 0x59; /* pop ecx */
2166 }
2167 else
2168 {
2169 /*
2170 * TPR read:
2171 *
2172 * push ECX [51]
2173 * push EDX [52]
2174 * push EAX [50]
2175 * mov ECX,0C0000082h [B9 82 00 00 C0]
2176 * rdmsr [0F 32]
2177 * mov EAX,EAX [89 C0]
2178 * pop EAX [58]
2179 * pop EDX [5A]
2180 * pop ECX [59]
2181 * jmp return_address [E9 return_address]
2182 *
2183 */
2184 Assert(pDis->Param1.fUse == DISUSE_REG_GEN32);
2185
2186 if (pDis->Param1.Base.idxGenReg != DISGREG_ECX)
2187 aPatch[off++] = 0x51; /* push ecx */
2188 if (pDis->Param1.Base.idxGenReg != DISGREG_EDX )
2189 aPatch[off++] = 0x52; /* push edx */
2190 if (pDis->Param1.Base.idxGenReg != DISGREG_EAX)
2191 aPatch[off++] = 0x50; /* push eax */
2192
2193 aPatch[off++] = 0x31; /* xor edx, edx */
2194 aPatch[off++] = 0xD2;
2195
2196 aPatch[off++] = 0xB9; /* mov ecx, 0xc0000082 */
2197 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
2198 off += sizeof(uint32_t);
2199
2200 aPatch[off++] = 0x0F; /* rdmsr */
2201 aPatch[off++] = 0x32;
2202
2203 if (pDis->Param1.Base.idxGenReg != DISGREG_EAX)
2204 {
2205 aPatch[off++] = 0x89; /* mov dst_reg, eax */
2206 aPatch[off++] = MAKE_MODRM(3, DISGREG_EAX, pDis->Param1.Base.idxGenReg);
2207 }
2208
2209 if (pDis->Param1.Base.idxGenReg != DISGREG_EAX)
2210 aPatch[off++] = 0x58; /* pop eax */
2211 if (pDis->Param1.Base.idxGenReg != DISGREG_EDX )
2212 aPatch[off++] = 0x5A; /* pop edx */
2213 if (pDis->Param1.Base.idxGenReg != DISGREG_ECX)
2214 aPatch[off++] = 0x59; /* pop ecx */
2215 }
2216 aPatch[off++] = 0xE9; /* jmp return_address */
2217 *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem + off + 4);
2218 off += sizeof(RTRCUINTPTR);
2219
2220 if (pVM->hm.s.pFreeGuestPatchMem + off <= pVM->hm.s.pGuestPatchMem + pVM->hm.s.cbGuestPatchMem)
2221 {
2222 /* Write new code to the patch buffer. */
2223 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hm.s.pFreeGuestPatchMem, aPatch, off);
2224 AssertRC(rc);
2225
2226#ifdef LOG_ENABLED
2227 uint32_t cbCurInstr;
2228 for (RTGCPTR GCPtrInstr = pVM->hm.s.pFreeGuestPatchMem;
2229 GCPtrInstr < pVM->hm.s.pFreeGuestPatchMem + off;
2230 GCPtrInstr += RT_MAX(cbCurInstr, 1))
2231 {
2232 char szOutput[256];
2233 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, GCPtrInstr, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2234 szOutput, sizeof(szOutput), &cbCurInstr);
2235 if (RT_SUCCESS(rc))
2236 Log(("Patch instr %s\n", szOutput));
2237 else
2238 Log(("%RGv: rc=%Rrc\n", GCPtrInstr, rc));
2239 }
2240#endif
2241
2242 pPatch->aNewOpcode[0] = 0xE9;
2243 *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
2244
2245 /* Overwrite the TPR instruction with a jump. */
2246 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->eip, pPatch->aNewOpcode, 5);
2247 AssertRC(rc);
2248
2249 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Jump");
2250
2251 pVM->hm.s.pFreeGuestPatchMem += off;
2252 pPatch->cbNewOp = 5;
2253
2254 pPatch->Core.Key = pCtx->eip;
2255 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2256 AssertRC(rc);
2257
2258 pVM->hm.s.cPatches++;
2259 pVM->hm.s.fTPRPatchingActive = true;
2260 STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchSuccess);
2261 return VINF_SUCCESS;
2262 }
2263
2264 Log(("Ran out of space in our patch buffer!\n"));
2265 }
2266 else
2267 Log(("hmR3PatchTprInstr: Failed to patch instr!\n"));
2268
2269
2270 /*
2271 * Save invalid patch, so we will not try again.
2272 */
2273 pPatch = &pVM->hm.s.aPatches[idx];
2274 pPatch->Core.Key = pCtx->eip;
2275 pPatch->enmType = HMTPRINSTR_INVALID;
2276 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2277 AssertRC(rc);
2278 pVM->hm.s.cPatches++;
2279 STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchFailure);
2280 return VINF_SUCCESS;
2281}
2282
2283
2284/**
2285 * Attempt to patch TPR mmio instructions.
2286 *
2287 * @returns VBox status code.
2288 * @param pVM Pointer to the VM.
2289 * @param pVCpu Pointer to the VMCPU.
2290 * @param pCtx Pointer to the guest CPU context.
2291 */
2292VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2293{
2294 NOREF(pCtx);
2295 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE,
2296 pVM->hm.s.pGuestPatchMem ? hmR3PatchTprInstr : hmR3ReplaceTprInstr,
2297 (void *)(uintptr_t)pVCpu->idCpu);
2298 AssertRC(rc);
2299 return rc;
2300}
2301
2302
2303/**
2304 * Checks if a code selector (CS) is suitable for execution
2305 * within VMX when unrestricted execution isn't available.
2306 *
2307 * @returns true if selector is suitable for VMX, otherwise
2308 * false.
2309 * @param pSel Pointer to the selector to check (CS).
2310 * uStackDpl The DPL of the stack segment.
2311 */
2312static bool hmR3IsCodeSelectorOkForVmx(PCPUMSELREG pSel, unsigned uStackDpl)
2313{
2314 bool rc = false;
2315
2316 do
2317 {
2318 /* Segment must be accessed. */
2319 if (!(pSel->Attr.u & X86_SEL_TYPE_ACCESSED))
2320 break;
2321 /* Segment must be a code segment. */
2322 if (!(pSel->Attr.u & X86_SEL_TYPE_CODE))
2323 break;
2324 /* The S bit must be set. */
2325 if (!pSel->Attr.n.u1DescType)
2326 break;
2327 if (pSel->Attr.n.u4Type & X86_SEL_TYPE_CONF)
2328 {
2329 /* For conforming segments, CS.DPL must be <= SS.DPL. */
2330 if (pSel->Attr.n.u2Dpl > uStackDpl)
2331 break;
2332 }
2333 else
2334 {
2335 /* For non-conforming segments, CS.DPL must equal SS.DPL. */
2336 if (pSel->Attr.n.u2Dpl != uStackDpl)
2337 break;
2338 }
2339 /* Segment must be present. */
2340 if (!pSel->Attr.n.u1Present)
2341 break;
2342 /* G bit must be set if any high limit bits are set. */
2343 if ((pSel->u32Limit & 0xfff00000) && !pSel->Attr.n.u1Granularity)
2344 break;
2345 /* G bit must be clear if any low limit bits are clear. */
2346 if ((pSel->u32Limit & 0x0fff) != 0x0fff && pSel->Attr.n.u1Granularity)
2347 break;
2348
2349 rc = true;
2350 } while (0);
2351 return rc;
2352}
2353
2354
2355/**
2356 * Checks if a data selector (DS/ES/FS/GS) is suitable for
2357 * execution within VMX when unrestricted execution isn't
2358 * available.
2359 *
2360 * @returns true if selector is suitable for VMX, otherwise
2361 * false.
2362 * @param pSel Pointer to the selector to check
2363 * (DS/ES/FS/GS).
2364 */
2365static bool hmR3IsDataSelectorOkForVmx(PCPUMSELREG pSel)
2366{
2367 bool rc = false;
2368
2369 /* If attributes are all zero, consider the segment unusable and therefore OK.
2370 * This logic must be in sync with HMVMXR0.cpp!
2371 */
2372 if (!pSel->Attr.u)
2373 return true;
2374
2375 do
2376 {
2377 /* Segment must be accessed. */
2378 if (!(pSel->Attr.u & X86_SEL_TYPE_ACCESSED))
2379 break;
2380 /* Code segments must also be readable. */
2381 if (pSel->Attr.u & X86_SEL_TYPE_CODE && !(pSel->Attr.u & X86_SEL_TYPE_READ))
2382 break;
2383 /* The S bit must be set. */
2384 if (!pSel->Attr.n.u1DescType)
2385 break;
2386 /* Except for conforming segments, DPL >= RPL. */
2387 if (pSel->Attr.n.u4Type <= X86_SEL_TYPE_ER_ACC && pSel->Attr.n.u2Dpl < (pSel->Sel & X86_SEL_RPL))
2388 break;
2389 /* Segment must be present. */
2390 if (!pSel->Attr.n.u1Present)
2391 break;
2392 /* G bit must be set if any high limit bits are set. */
2393 if ((pSel->u32Limit & 0xfff00000) && !pSel->Attr.n.u1Granularity)
2394 break;
2395 /* G bit must be clear if any low limit bits are clear. */
2396 if ((pSel->u32Limit & 0x0fff) != 0x0fff && pSel->Attr.n.u1Granularity)
2397 break;
2398
2399 rc = true;
2400 } while (0);
2401 return rc;
2402}
2403
2404
2405/**
2406 * Checks if the stack selector (SS) is suitable for execution
2407 * within VMX when unrestricted execution isn't available.
2408 *
2409 * @returns true if selector is suitable for VMX, otherwise
2410 * false.
2411 * @param pSel Pointer to the selector to check (SS).
2412 */
2413static bool hmR3IsStackSelectorOkForVmx(PCPUMSELREG pSel)
2414{
2415 bool rc = false;
2416
2417 /* If attributes are all zero, consider the segment unusable and therefore OK.
2418 * This logic must be in sync with HMVMXR0.cpp!
2419 */
2420 if (!pSel->Attr.u)
2421 return true;
2422
2423 do
2424 {
2425 /* Segment must be accessed. */
2426 if (!(pSel->Attr.u & X86_SEL_TYPE_ACCESSED))
2427 break;
2428 /* Segment must be writable. */
2429 if (!(pSel->Attr.u & X86_SEL_TYPE_WRITE))
2430 break;
2431 /* Segment must not be a code segment. */
2432 if (pSel->Attr.u & X86_SEL_TYPE_CODE)
2433 break;
2434 /* The S bit must be set. */
2435 if (!pSel->Attr.n.u1DescType)
2436 break;
2437 /* DPL must equal RPL. */
2438 if (pSel->Attr.n.u2Dpl != (pSel->Sel & X86_SEL_RPL))
2439 break;
2440 /* Segment must be present. */
2441 if (!pSel->Attr.n.u1Present)
2442 break;
2443 /* G bit must be set if any high limit bits are set. */
2444 if ((pSel->u32Limit & 0xfff00000) && !pSel->Attr.n.u1Granularity)
2445 break;
2446 /* G bit must be clear if any low limit bits are clear. */
2447 if ((pSel->u32Limit & 0x0fff) != 0x0fff && pSel->Attr.n.u1Granularity)
2448 break;
2449
2450 rc = true;
2451 } while (0);
2452 return rc;
2453}
2454
2455
2456/**
2457 * Force execution of the current IO code in the recompiler.
2458 *
2459 * @returns VBox status code.
2460 * @param pVM Pointer to the VM.
2461 * @param pCtx Partial VM execution context.
2462 */
2463VMMR3_INT_DECL(int) HMR3EmulateIoBlock(PVM pVM, PCPUMCTX pCtx)
2464{
2465 PVMCPU pVCpu = VMMGetCpu(pVM);
2466
2467 Assert(HMIsEnabled(pVM));
2468 Log(("HMR3EmulateIoBlock\n"));
2469
2470 /* This is primarily intended to speed up Grub, so we don't care about paged protected mode. */
2471 if (HMCanEmulateIoBlockEx(pCtx))
2472 {
2473 Log(("HMR3EmulateIoBlock -> enabled\n"));
2474 pVCpu->hm.s.EmulateIoBlock.fEnabled = true;
2475 pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip = pCtx->rip;
2476 pVCpu->hm.s.EmulateIoBlock.cr0 = pCtx->cr0;
2477 return VINF_EM_RESCHEDULE_REM;
2478 }
2479 return VINF_SUCCESS;
2480}
2481
2482
2483/**
2484 * Checks if we can currently use hardware accelerated raw mode.
2485 *
2486 * @returns true if we can currently use hardware acceleration, otherwise false.
2487 * @param pVM Pointer to the VM.
2488 * @param pCtx Partial VM execution context.
2489 */
2490VMMR3DECL(bool) HMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)
2491{
2492 PVMCPU pVCpu = VMMGetCpu(pVM);
2493
2494 Assert(HMIsEnabled(pVM));
2495
2496 /* If we're still executing the IO code, then return false. */
2497 if ( RT_UNLIKELY(pVCpu->hm.s.EmulateIoBlock.fEnabled)
2498 && pCtx->rip < pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip + 0x200
2499 && pCtx->rip > pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip - 0x200
2500 && pCtx->cr0 == pVCpu->hm.s.EmulateIoBlock.cr0)
2501 return false;
2502
2503 pVCpu->hm.s.EmulateIoBlock.fEnabled = false;
2504
2505 /* AMD-V supports real & protected mode with or without paging. */
2506 if (pVM->hm.s.svm.fEnabled)
2507 {
2508 pVCpu->hm.s.fActive = true;
2509 return true;
2510 }
2511
2512 pVCpu->hm.s.fActive = false;
2513
2514 /* Note! The context supplied by REM is partial. If we add more checks here, be sure to verify that REM provides this info! */
2515 Assert( (pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS)
2516 || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS));
2517
2518 bool fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVmmDevHeapIsEnabled(pVM);
2519 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
2520 {
2521 /*
2522 * The VMM device heap is a requirement for emulating real mode or protected mode without paging with the unrestricted
2523 * guest execution feature i missing (VT-x only).
2524 */
2525 if (fSupportsRealMode)
2526 {
2527 if (CPUMIsGuestInRealModeEx(pCtx))
2528 {
2529 /* In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector
2530 * bases and limits, i.e. limit must be 64K and base must be selector * 16.
2531 * If this is not true, we cannot execute real mode as V86 and have to fall
2532 * back to emulation.
2533 */
2534 if ( pCtx->cs.Sel != (pCtx->cs.u64Base >> 4)
2535 || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4)
2536 || pCtx->es.Sel != (pCtx->es.u64Base >> 4)
2537 || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4)
2538 || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4)
2539 || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4))
2540 {
2541 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelBase);
2542 return false;
2543 }
2544 if ( (pCtx->cs.u32Limit != 0xffff)
2545 || (pCtx->ds.u32Limit != 0xffff)
2546 || (pCtx->es.u32Limit != 0xffff)
2547 || (pCtx->ss.u32Limit != 0xffff)
2548 || (pCtx->fs.u32Limit != 0xffff)
2549 || (pCtx->gs.u32Limit != 0xffff))
2550 {
2551 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit);
2552 return false;
2553 }
2554 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckRmOk);
2555 }
2556 else
2557 {
2558 PGMMODE enmGuestMode = PGMGetGuestMode(pVCpu);
2559 /* Verify the requirements for executing code in protected
2560 mode. VT-x can't handle the CPU state right after a switch
2561 from real to protected mode. (all sorts of RPL & DPL assumptions) */
2562#if VBOX_WITH_OLD_VTX_CODE
2563 if ( pVCpu->hm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
2564 && enmGuestMode >= PGMMODE_PROTECTED)
2565#else
2566 if (pVCpu->hm.s.vmx.fWasInRealMode)
2567#endif
2568 {
2569 //@todo: If guest is in V86 mode, these checks should be different!
2570#if VBOX_WITH_OLD_VTX_CODE
2571 if ( (pCtx->cs.Sel & X86_SEL_RPL)
2572 || (pCtx->ds.Sel & X86_SEL_RPL)
2573 || (pCtx->es.Sel & X86_SEL_RPL)
2574 || (pCtx->fs.Sel & X86_SEL_RPL)
2575 || (pCtx->gs.Sel & X86_SEL_RPL)
2576 || (pCtx->ss.Sel & X86_SEL_RPL))
2577 {
2578 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);
2579 return false;
2580 }
2581#else
2582 if ((pCtx->cs.Sel & X86_SEL_RPL) != (pCtx->ss.Sel & X86_SEL_RPL))
2583 {
2584 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRpl);
2585 return false;
2586 }
2587 if ( !hmR3IsCodeSelectorOkForVmx(&pCtx->cs, pCtx->ss.Attr.n.u2Dpl)
2588 || !hmR3IsDataSelectorOkForVmx(&pCtx->ds)
2589 || !hmR3IsDataSelectorOkForVmx(&pCtx->es)
2590 || !hmR3IsDataSelectorOkForVmx(&pCtx->fs)
2591 || !hmR3IsDataSelectorOkForVmx(&pCtx->gs)
2592 || !hmR3IsStackSelectorOkForVmx(&pCtx->ss))
2593 {
2594 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);
2595 return false;
2596 }
2597#endif
2598 }
2599 /* VT-x also chokes on invalid tr or ldtr selectors (minix) */
2600 if (pCtx->gdtr.cbGdt)
2601 {
2602 if (pCtx->tr.Sel > pCtx->gdtr.cbGdt)
2603 {
2604 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadTr);
2605 return false;
2606 }
2607 else if (pCtx->ldtr.Sel > pCtx->gdtr.cbGdt)
2608 {
2609 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadLdt);
2610 return false;
2611 }
2612 }
2613 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckPmOk);
2614 }
2615 }
2616 else
2617 {
2618 if ( !CPUMIsGuestInLongModeEx(pCtx)
2619 && !pVM->hm.s.vmx.fUnrestrictedGuest)
2620 {
2621#ifdef VBOX_WITH_OLD_VTX_CODE
2622 /** @todo This should (probably) be set on every excursion to the REM,
2623 * however it's too risky right now. So, only apply it when we go
2624 * back to REM for real mode execution. (The XP hack below doesn't
2625 * work reliably without this.)
2626 * Update: Implemented in EM.cpp, see #ifdef EM_NOTIFY_HM. */
2627 for (uint32_t i = 0; i < pVM->cCpus; i++)
2628 pVM->aCpus[i].hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
2629#endif
2630
2631 if ( !pVM->hm.s.fNestedPaging /* requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */
2632 || CPUMIsGuestInRealModeEx(pCtx)) /* requires a fake TSS for real mode - stored in the VMM device heap */
2633 return false;
2634
2635 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
2636 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr.Sel == 0)
2637 return false;
2638
2639 /* The guest is about to complete the switch to protected mode. Wait a bit longer. */
2640 /* Windows XP; switch to protected mode; all selectors are marked not present in the
2641 * hidden registers (possible recompiler bug; see load_seg_vm) */
2642 if (pCtx->cs.Attr.n.u1Present == 0)
2643 return false;
2644 if (pCtx->ss.Attr.n.u1Present == 0)
2645 return false;
2646
2647 /* Windows XP: possible same as above, but new recompiler requires new heuristics?
2648 VT-x doesn't seem to like something about the guest state and this stuff avoids it. */
2649 /** @todo This check is actually wrong, it doesn't take the direction of the
2650 * stack segment into account. But, it does the job for now. */
2651 if (pCtx->rsp >= pCtx->ss.u32Limit)
2652 return false;
2653#if 0
2654 if ( pCtx->cs.Sel >= pCtx->gdtr.cbGdt
2655 || pCtx->ss.Sel >= pCtx->gdtr.cbGdt
2656 || pCtx->ds.Sel >= pCtx->gdtr.cbGdt
2657 || pCtx->es.Sel >= pCtx->gdtr.cbGdt
2658 || pCtx->fs.Sel >= pCtx->gdtr.cbGdt
2659 || pCtx->gs.Sel >= pCtx->gdtr.cbGdt)
2660 return false;
2661#endif
2662 }
2663 }
2664 }
2665
2666 if (pVM->hm.s.vmx.fEnabled)
2667 {
2668 uint32_t mask;
2669
2670 /* if bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
2671 mask = (uint32_t)pVM->hm.s.vmx.msr.vmx_cr0_fixed0;
2672 /* Note: We ignore the NE bit here on purpose; see vmmr0\hmr0.cpp for details. */
2673 mask &= ~X86_CR0_NE;
2674
2675 if (fSupportsRealMode)
2676 {
2677 /* Note: We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
2678 mask &= ~(X86_CR0_PG|X86_CR0_PE);
2679 }
2680 else
2681 {
2682 /* We support protected mode without paging using identity mapping. */
2683 mask &= ~X86_CR0_PG;
2684 }
2685 if ((pCtx->cr0 & mask) != mask)
2686 return false;
2687
2688 /* if bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
2689 mask = (uint32_t)~pVM->hm.s.vmx.msr.vmx_cr0_fixed1;
2690 if ((pCtx->cr0 & mask) != 0)
2691 return false;
2692
2693 /* if bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
2694 mask = (uint32_t)pVM->hm.s.vmx.msr.vmx_cr4_fixed0;
2695 mask &= ~X86_CR4_VMXE;
2696 if ((pCtx->cr4 & mask) != mask)
2697 return false;
2698
2699 /* if bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
2700 mask = (uint32_t)~pVM->hm.s.vmx.msr.vmx_cr4_fixed1;
2701 if ((pCtx->cr4 & mask) != 0)
2702 return false;
2703
2704 pVCpu->hm.s.fActive = true;
2705 return true;
2706 }
2707
2708 return false;
2709}
2710
2711
2712/**
2713 * Checks if we need to reschedule due to VMM device heap changes.
2714 *
2715 * @returns true if a reschedule is required, otherwise false.
2716 * @param pVM Pointer to the VM.
2717 * @param pCtx VM execution context.
2718 */
2719VMMR3_INT_DECL(bool) HMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx)
2720{
2721 /*
2722 * The VMM device heap is a requirement for emulating real-mode or protected-mode without paging
2723 * when the unrestricted guest execution feature is missing (VT-x only).
2724 */
2725#ifdef VBOX_WITH_OLD_VTX_CODE
2726 if ( pVM->hm.s.vmx.fEnabled
2727 && !pVM->hm.s.vmx.fUnrestrictedGuest
2728 && !CPUMIsGuestInPagedProtectedModeEx(pCtx)
2729 && !PDMVmmDevHeapIsEnabled(pVM)
2730 && (pVM->hm.s.fNestedPaging || CPUMIsGuestInRealModeEx(pCtx)))
2731 return true;
2732#else
2733 if ( pVM->hm.s.vmx.fEnabled
2734 && !pVM->hm.s.vmx.fUnrestrictedGuest
2735 && CPUMIsGuestInRealModeEx(pCtx)
2736 && !PDMVmmDevHeapIsEnabled(pVM))
2737 return true;
2738#endif
2739
2740 return false;
2741}
2742
2743
2744/**
2745 * Notification from EM about a rescheduling into hardware assisted execution
2746 * mode.
2747 *
2748 * @param pVCpu Pointer to the current VMCPU.
2749 */
2750VMMR3_INT_DECL(void) HMR3NotifyScheduled(PVMCPU pVCpu)
2751{
2752 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
2753}
2754
2755
2756/**
2757 * Notification from EM about returning from instruction emulation (REM / EM).
2758 *
2759 * @param pVCpu Pointer to the VMCPU.
2760 */
2761VMMR3_INT_DECL(void) HMR3NotifyEmulated(PVMCPU pVCpu)
2762{
2763 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
2764}
2765
2766
2767/**
2768 * Checks if we are currently using hardware accelerated raw mode.
2769 *
2770 * @returns true if hardware acceleration is being used, otherwise false.
2771 * @param pVCpu Pointer to the VMCPU.
2772 */
2773VMMR3_INT_DECL(bool) HMR3IsActive(PVMCPU pVCpu)
2774{
2775 return pVCpu->hm.s.fActive;
2776}
2777
2778
2779/**
2780 * External interface for querying whether hardware accelerated raw mode is
2781 * enabled.
2782 *
2783 * @returns true if nested paging is being used, otherwise false.
2784 * @param pUVM The user mode VM handle.
2785 * @sa HMIsEnabled, HMIsEnabledNotMacro.
2786 */
2787VMMR3DECL(bool) HMR3IsEnabled(PUVM pUVM)
2788{
2789 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2790 PVM pVM = pUVM->pVM;
2791 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2792 return pVM->fHMEnabled; /* Don't use the macro as the GUI may query us very very early. */
2793}
2794
2795
2796/**
2797 * Checks if we are currently using nested paging.
2798 *
2799 * @returns true if nested paging is being used, otherwise false.
2800 * @param pUVM The user mode VM handle.
2801 */
2802VMMR3DECL(bool) HMR3IsNestedPagingActive(PUVM pUVM)
2803{
2804 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2805 PVM pVM = pUVM->pVM;
2806 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2807 return pVM->hm.s.fNestedPaging;
2808}
2809
2810
2811/**
2812 * Checks if we are currently using VPID in VT-x mode.
2813 *
2814 * @returns true if VPID is being used, otherwise false.
2815 * @param pUVM The user mode VM handle.
2816 */
2817VMMR3DECL(bool) HMR3IsVpidActive(PUVM pUVM)
2818{
2819 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2820 PVM pVM = pUVM->pVM;
2821 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2822 return pVM->hm.s.vmx.fVpid;
2823}
2824
2825
2826/**
2827 * Checks if we are currently using VT-x unrestricted execution,
2828 * aka UX.
2829 *
2830 * @returns true if UX is being used, otherwise false.
2831 * @param pUVM The user mode VM handle.
2832 */
2833VMMR3DECL(bool) HMR3IsUXActive(PUVM pUVM)
2834{
2835 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2836 PVM pVM = pUVM->pVM;
2837 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2838 return pVM->hm.s.vmx.fUnrestrictedGuest;
2839}
2840
2841
2842/**
2843 * Checks if internal events are pending. In that case we are not allowed to dispatch interrupts.
2844 *
2845 * @returns true if an internal event is pending, otherwise false.
2846 * @param pVM Pointer to the VM.
2847 */
2848VMMR3_INT_DECL(bool) HMR3IsEventPending(PVMCPU pVCpu)
2849{
2850 return HMIsEnabled(pVCpu->pVMR3) && pVCpu->hm.s.Event.fPending;
2851}
2852
2853
2854/**
2855 * Checks if the VMX-preemption timer is being used.
2856 *
2857 * @returns true if the VMX-preemption timer is being used, otherwise false.
2858 * @param pVM Pointer to the VM.
2859 */
2860VMMR3_INT_DECL(bool) HMR3IsVmxPreemptionTimerUsed(PVM pVM)
2861{
2862 return HMIsEnabled(pVM)
2863 && pVM->hm.s.vmx.fEnabled
2864 && pVM->hm.s.vmx.fUsePreemptTimer;
2865}
2866
2867
2868/**
2869 * Restart an I/O instruction that was refused in ring-0
2870 *
2871 * @returns Strict VBox status code. Informational status codes other than the one documented
2872 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2873 * @retval VINF_SUCCESS Success.
2874 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2875 * status code must be passed on to EM.
2876 * @retval VERR_NOT_FOUND if no pending I/O instruction.
2877 *
2878 * @param pVM Pointer to the VM.
2879 * @param pVCpu Pointer to the VMCPU.
2880 * @param pCtx Pointer to the guest CPU context.
2881 */
2882VMMR3_INT_DECL(VBOXSTRICTRC) HMR3RestartPendingIOInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2883{
2884 HMPENDINGIO enmType = pVCpu->hm.s.PendingIO.enmType;
2885
2886 pVCpu->hm.s.PendingIO.enmType = HMPENDINGIO_INVALID;
2887
2888 if ( pVCpu->hm.s.PendingIO.GCPtrRip != pCtx->rip
2889 || enmType == HMPENDINGIO_INVALID)
2890 return VERR_NOT_FOUND;
2891
2892 VBOXSTRICTRC rcStrict;
2893 switch (enmType)
2894 {
2895 case HMPENDINGIO_PORT_READ:
2896 {
2897 uint32_t uAndVal = pVCpu->hm.s.PendingIO.s.Port.uAndVal;
2898 uint32_t u32Val = 0;
2899
2900 rcStrict = IOMIOPortRead(pVM, pVCpu, pVCpu->hm.s.PendingIO.s.Port.uPort,
2901 &u32Val,
2902 pVCpu->hm.s.PendingIO.s.Port.cbSize);
2903 if (IOM_SUCCESS(rcStrict))
2904 {
2905 /* Write back to the EAX register. */
2906 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
2907 pCtx->rip = pVCpu->hm.s.PendingIO.GCPtrRipNext;
2908 }
2909 break;
2910 }
2911
2912 case HMPENDINGIO_PORT_WRITE:
2913 rcStrict = IOMIOPortWrite(pVM, pVCpu, pVCpu->hm.s.PendingIO.s.Port.uPort,
2914 pCtx->eax & pVCpu->hm.s.PendingIO.s.Port.uAndVal,
2915 pVCpu->hm.s.PendingIO.s.Port.cbSize);
2916 if (IOM_SUCCESS(rcStrict))
2917 pCtx->rip = pVCpu->hm.s.PendingIO.GCPtrRipNext;
2918 break;
2919
2920 default:
2921 AssertLogRelFailedReturn(VERR_HM_UNKNOWN_IO_INSTRUCTION);
2922 }
2923
2924 return rcStrict;
2925}
2926
2927
2928/**
2929 * Check fatal VT-x/AMD-V error and produce some meaningful
2930 * log release message.
2931 *
2932 * @param pVM Pointer to the VM.
2933 * @param iStatusCode VBox status code.
2934 */
2935VMMR3_INT_DECL(void) HMR3CheckError(PVM pVM, int iStatusCode)
2936{
2937 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2938 {
2939 switch (iStatusCode)
2940 {
2941 case VERR_VMX_INVALID_VMCS_FIELD:
2942 break;
2943
2944 case VERR_VMX_INVALID_VMCS_PTR:
2945 LogRel(("HM: VERR_VMX_INVALID_VMCS_PTR:\n"));
2946 LogRel(("HM: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].hm.s.vmx.LastError.u64VMCSPhys, pVM->aCpus[i].hm.s.vmx.HCPhysVmcs));
2947 LogRel(("HM: CPU%d Current VMCS version %x\n", i, pVM->aCpus[i].hm.s.vmx.LastError.u32VMCSRevision));
2948 LogRel(("HM: CPU%d Entered Cpu %d\n", i, pVM->aCpus[i].hm.s.vmx.LastError.idEnteredCpu));
2949 LogRel(("HM: CPU%d Current Cpu %d\n", i, pVM->aCpus[i].hm.s.vmx.LastError.idCurrentCpu));
2950 break;
2951
2952 case VERR_VMX_UNABLE_TO_START_VM:
2953 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM:\n"));
2954 LogRel(("HM: CPU%d Instruction error %#x\n", i, pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError));
2955 LogRel(("HM: CPU%d Exit reason %#x\n", i, pVM->aCpus[i].hm.s.vmx.LastError.u32ExitReason));
2956 if (pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS)
2957 {
2958 LogRel(("HM: CPU%d PinCtls %#RX32\n", i, pVM->aCpus[i].hm.s.vmx.u32PinCtls));
2959 LogRel(("HM: CPU%d ProcCtls %#RX32\n", i, pVM->aCpus[i].hm.s.vmx.u32ProcCtls));
2960 LogRel(("HM: CPU%d ProcCtls2 %#RX32\n", i, pVM->aCpus[i].hm.s.vmx.u32ProcCtls2));
2961 LogRel(("HM: CPU%d EntryCtls %#RX32\n", i, pVM->aCpus[i].hm.s.vmx.u32EntryCtls));
2962 LogRel(("HM: CPU%d ExitCtls %#RX32\n", i, pVM->aCpus[i].hm.s.vmx.u32ExitCtls));
2963 LogRel(("HM: CPU%d MSRBitmapPhys %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap));
2964#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
2965 LogRel(("HM: CPU%d GuestMSRPhys %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysGuestMsr));
2966 LogRel(("HM: CPU%d HostMsrPhys %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysHostMsr));
2967 LogRel(("HM: CPU%d cGuestMSRs %u\n", i, pVM->aCpus[i].hm.s.vmx.cGuestMsrs));
2968#endif
2969 }
2970 /** @todo Log VM-entry event injection control fields
2971 * VMX_VMCS_CTRL_ENTRY_IRQ_INFO, VMX_VMCS_CTRL_ENTRY_EXCEPTION_ERRCODE
2972 * and VMX_VMCS_CTRL_ENTRY_INSTR_LENGTH from the VMCS. */
2973 break;
2974
2975 case VERR_VMX_INVALID_VMXON_PTR:
2976 break;
2977
2978 case VERR_VMX_UNEXPECTED_EXIT_CODE:
2979 case VERR_SVM_UNKNOWN_EXIT:
2980 case VERR_SVM_UNEXPECTED_EXIT:
2981 case VERR_SVM_UNEXPECTED_PATCH_TYPE:
2982 case VERR_SVM_UNEXPECTED_XCPT_EXIT:
2983 LogRel(("HM: CPU%d HM error %#x\n", i, pVM->aCpus[i].hm.s.u32HMError));
2984 break;
2985 }
2986 }
2987
2988 if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM)
2989 {
2990 LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed %x\n", pVM->hm.s.vmx.msr.vmx_entry.n.allowed1));
2991 LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %x\n", pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0));
2992 }
2993}
2994
2995
2996/**
2997 * Execute state save operation.
2998 *
2999 * @returns VBox status code.
3000 * @param pVM Pointer to the VM.
3001 * @param pSSM SSM operation handle.
3002 */
3003static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM)
3004{
3005 int rc;
3006
3007 Log(("hmR3Save:\n"));
3008
3009 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3010 {
3011 /*
3012 * Save the basic bits - fortunately all the other things can be resynced on load.
3013 */
3014 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.Event.fPending);
3015 AssertRCReturn(rc, rc);
3016 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.Event.u32ErrCode);
3017 AssertRCReturn(rc, rc);
3018 rc = SSMR3PutU64(pSSM, pVM->aCpus[i].hm.s.Event.u64IntrInfo);
3019 AssertRCReturn(rc, rc);
3020
3021#ifdef VBOX_WITH_OLD_VTX_CODE
3022 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.vmx.enmLastSeenGuestMode);
3023 AssertRCReturn(rc, rc);
3024 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.vmx.enmCurrGuestMode);
3025 AssertRCReturn(rc, rc);
3026 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.vmx.enmPrevGuestMode);
3027 AssertRCReturn(rc, rc);
3028#else
3029 //@todo: We only need to save pVM->aCpus[i].hm.s.vmx.fWasInRealMode and
3030 // perhaps not even that (the initial value of 'true' is safe).
3031 uint32_t u32Dummy = PGMMODE_REAL;
3032 rc = SSMR3PutU32(pSSM, u32Dummy);
3033 AssertRCReturn(rc, rc);
3034 rc = SSMR3PutU32(pSSM, u32Dummy);
3035 AssertRCReturn(rc, rc);
3036 rc = SSMR3PutU32(pSSM, u32Dummy);
3037 AssertRCReturn(rc, rc);
3038#endif
3039 }
3040#ifdef VBOX_HM_WITH_GUEST_PATCHING
3041 rc = SSMR3PutGCPtr(pSSM, pVM->hm.s.pGuestPatchMem);
3042 AssertRCReturn(rc, rc);
3043 rc = SSMR3PutGCPtr(pSSM, pVM->hm.s.pFreeGuestPatchMem);
3044 AssertRCReturn(rc, rc);
3045 rc = SSMR3PutU32(pSSM, pVM->hm.s.cbGuestPatchMem);
3046 AssertRCReturn(rc, rc);
3047
3048 /* Store all the guest patch records too. */
3049 rc = SSMR3PutU32(pSSM, pVM->hm.s.cPatches);
3050 AssertRCReturn(rc, rc);
3051
3052 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
3053 {
3054 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
3055
3056 rc = SSMR3PutU32(pSSM, pPatch->Core.Key);
3057 AssertRCReturn(rc, rc);
3058
3059 rc = SSMR3PutMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
3060 AssertRCReturn(rc, rc);
3061
3062 rc = SSMR3PutU32(pSSM, pPatch->cbOp);
3063 AssertRCReturn(rc, rc);
3064
3065 rc = SSMR3PutMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
3066 AssertRCReturn(rc, rc);
3067
3068 rc = SSMR3PutU32(pSSM, pPatch->cbNewOp);
3069 AssertRCReturn(rc, rc);
3070
3071 AssertCompileSize(HMTPRINSTR, 4);
3072 rc = SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType);
3073 AssertRCReturn(rc, rc);
3074
3075 rc = SSMR3PutU32(pSSM, pPatch->uSrcOperand);
3076 AssertRCReturn(rc, rc);
3077
3078 rc = SSMR3PutU32(pSSM, pPatch->uDstOperand);
3079 AssertRCReturn(rc, rc);
3080
3081 rc = SSMR3PutU32(pSSM, pPatch->pJumpTarget);
3082 AssertRCReturn(rc, rc);
3083
3084 rc = SSMR3PutU32(pSSM, pPatch->cFaults);
3085 AssertRCReturn(rc, rc);
3086 }
3087#endif
3088 return VINF_SUCCESS;
3089}
3090
3091
3092/**
3093 * Execute state load operation.
3094 *
3095 * @returns VBox status code.
3096 * @param pVM Pointer to the VM.
3097 * @param pSSM SSM operation handle.
3098 * @param uVersion Data layout version.
3099 * @param uPass The data pass.
3100 */
3101static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3102{
3103 int rc;
3104
3105 Log(("hmR3Load:\n"));
3106 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
3107
3108 /*
3109 * Validate version.
3110 */
3111 if ( uVersion != HM_SSM_VERSION
3112 && uVersion != HM_SSM_VERSION_NO_PATCHING
3113 && uVersion != HM_SSM_VERSION_2_0_X)
3114 {
3115 AssertMsgFailed(("hmR3Load: Invalid version uVersion=%d!\n", uVersion));
3116 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
3117 }
3118 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3119 {
3120 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hm.s.Event.fPending);
3121 AssertRCReturn(rc, rc);
3122 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hm.s.Event.u32ErrCode);
3123 AssertRCReturn(rc, rc);
3124 rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].hm.s.Event.u64IntrInfo);
3125 AssertRCReturn(rc, rc);
3126
3127 if (uVersion >= HM_SSM_VERSION_NO_PATCHING)
3128 {
3129 uint32_t val;
3130
3131#ifdef VBOX_WITH_OLD_VTX_CODE
3132 rc = SSMR3GetU32(pSSM, &val);
3133 AssertRCReturn(rc, rc);
3134 pVM->aCpus[i].hm.s.vmx.enmLastSeenGuestMode = (PGMMODE)val;
3135
3136 rc = SSMR3GetU32(pSSM, &val);
3137 AssertRCReturn(rc, rc);
3138 pVM->aCpus[i].hm.s.vmx.enmCurrGuestMode = (PGMMODE)val;
3139
3140 rc = SSMR3GetU32(pSSM, &val);
3141 AssertRCReturn(rc, rc);
3142 pVM->aCpus[i].hm.s.vmx.enmPrevGuestMode = (PGMMODE)val;
3143#else
3144 //@todo: See note above re saving enmLastSeenGuestMode
3145 rc = SSMR3GetU32(pSSM, &val);
3146 AssertRCReturn(rc, rc);
3147 rc = SSMR3GetU32(pSSM, &val);
3148 AssertRCReturn(rc, rc);
3149 rc = SSMR3GetU32(pSSM, &val);
3150 AssertRCReturn(rc, rc);
3151#endif
3152 }
3153 }
3154#ifdef VBOX_HM_WITH_GUEST_PATCHING
3155 if (uVersion > HM_SSM_VERSION_NO_PATCHING)
3156 {
3157 rc = SSMR3GetGCPtr(pSSM, &pVM->hm.s.pGuestPatchMem);
3158 AssertRCReturn(rc, rc);
3159 rc = SSMR3GetGCPtr(pSSM, &pVM->hm.s.pFreeGuestPatchMem);
3160 AssertRCReturn(rc, rc);
3161 rc = SSMR3GetU32(pSSM, &pVM->hm.s.cbGuestPatchMem);
3162 AssertRCReturn(rc, rc);
3163
3164 /* Fetch all TPR patch records. */
3165 rc = SSMR3GetU32(pSSM, &pVM->hm.s.cPatches);
3166 AssertRCReturn(rc, rc);
3167
3168 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
3169 {
3170 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
3171
3172 rc = SSMR3GetU32(pSSM, &pPatch->Core.Key);
3173 AssertRCReturn(rc, rc);
3174
3175 rc = SSMR3GetMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
3176 AssertRCReturn(rc, rc);
3177
3178 rc = SSMR3GetU32(pSSM, &pPatch->cbOp);
3179 AssertRCReturn(rc, rc);
3180
3181 rc = SSMR3GetMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
3182 AssertRCReturn(rc, rc);
3183
3184 rc = SSMR3GetU32(pSSM, &pPatch->cbNewOp);
3185 AssertRCReturn(rc, rc);
3186
3187 rc = SSMR3GetU32(pSSM, (uint32_t *)&pPatch->enmType);
3188 AssertRCReturn(rc, rc);
3189
3190 if (pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT)
3191 pVM->hm.s.fTPRPatchingActive = true;
3192
3193 Assert(pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT || pVM->hm.s.fTPRPatchingActive == false);
3194
3195 rc = SSMR3GetU32(pSSM, &pPatch->uSrcOperand);
3196 AssertRCReturn(rc, rc);
3197
3198 rc = SSMR3GetU32(pSSM, &pPatch->uDstOperand);
3199 AssertRCReturn(rc, rc);
3200
3201 rc = SSMR3GetU32(pSSM, &pPatch->cFaults);
3202 AssertRCReturn(rc, rc);
3203
3204 rc = SSMR3GetU32(pSSM, &pPatch->pJumpTarget);
3205 AssertRCReturn(rc, rc);
3206
3207 Log(("hmR3Load: patch %d\n", i));
3208 Log(("Key = %x\n", pPatch->Core.Key));
3209 Log(("cbOp = %d\n", pPatch->cbOp));
3210 Log(("cbNewOp = %d\n", pPatch->cbNewOp));
3211 Log(("type = %d\n", pPatch->enmType));
3212 Log(("srcop = %d\n", pPatch->uSrcOperand));
3213 Log(("dstop = %d\n", pPatch->uDstOperand));
3214 Log(("cFaults = %d\n", pPatch->cFaults));
3215 Log(("target = %x\n", pPatch->pJumpTarget));
3216 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
3217 AssertRC(rc);
3218 }
3219 }
3220#endif
3221
3222 return VINF_SUCCESS;
3223}
3224
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette