VirtualBox

source: vbox/trunk/src/VBox/VMM/HWACCM.cpp@ 4811

Last change on this file since 4811 was 4811, checked in by vboxsync, 17 years ago

Split VMMR0Entry into VMMR0EntryInt, VMMR0EntryFast and VMMr0EntryEx. This will prevent the SUPCallVMMR0Ex path from causing harm and messing up the paths that has to be optimized.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 33.9 KB
Line 
1/* $Id: HWACCM.cpp 4811 2007-09-14 17:53:56Z vboxsync $ */
2/** @file
3 * HWACCM - Intel/AMD VM Hardware Support Manager
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HWACCM
22#include <VBox/cpum.h>
23#include <VBox/stam.h>
24#include <VBox/mm.h>
25#include <VBox/pdm.h>
26#include <VBox/pgm.h>
27#include <VBox/trpm.h>
28#include <VBox/dbgf.h>
29#include <VBox/hwacc_vmx.h>
30#include <VBox/hwacc_svm.h>
31#include "HWACCMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/err.h>
34#include <VBox/param.h>
35#include <VBox/patm.h>
36#include <VBox/csam.h>
37#include <VBox/selm.h>
38
39#include <iprt/assert.h>
40#include <VBox/log.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <iprt/thread.h>
44
45
46/*******************************************************************************
47* Internal Functions *
48*******************************************************************************/
49static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM);
50static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
51
52
53/**
54 * Initializes the HWACCM.
55 *
56 * @returns VBox status code.
57 * @param pVM The VM to operate on.
58 */
59HWACCMR3DECL(int) HWACCMR3Init(PVM pVM)
60{
61 LogFlow(("HWACCMR3Init\n"));
62
63 /*
64 * Assert alignment and sizes.
65 */
66 AssertRelease(!(RT_OFFSETOF(VM, hwaccm.s) & 31));
67 AssertRelease(sizeof(pVM->hwaccm.s) <= sizeof(pVM->hwaccm.padding));
68
69 /* Some structure checks. */
70 AssertMsg(RT_OFFSETOF(SVM_VMCB, u8Reserved3) == 0xC0, ("u8Reserved3 offset = %x\n", RT_OFFSETOF(SVM_VMCB, u8Reserved3)));
71 AssertMsg(RT_OFFSETOF(SVM_VMCB, ctrl.EventInject) == 0xA8, ("ctrl.EventInject offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.EventInject)));
72 AssertMsg(RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo) == 0x88, ("ctrl.ExitIntInfo offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo)));
73 AssertMsg(RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl) == 0x58, ("ctrl.TLBCtrl offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl)));
74
75 AssertMsg(RT_OFFSETOF(SVM_VMCB, guest) == 0x400, ("guest offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest)));
76 AssertMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved4) == 0x4A0, ("guest.u8Reserved4 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved4)));
77 AssertMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved6) == 0x4D8, ("guest.u8Reserved6 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved6)));
78 AssertMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved7) == 0x580, ("guest.u8Reserved7 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved7)));
79 AssertMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved9) == 0x648, ("guest.u8Reserved9 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved9)));
80 AssertMsg(RT_OFFSETOF(SVM_VMCB, u8Reserved10) == 0x698, ("u8Reserved3 offset = %x\n", RT_OFFSETOF(SVM_VMCB, u8Reserved10)));
81 AssertMsg(sizeof(SVM_VMCB) == 0x1000, ("SVM_VMCB size = %x\n", sizeof(SVM_VMCB)));
82
83
84 /*
85 * Register the saved state data unit.
86 */
87 int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HWACCM_SSM_VERSION, sizeof(HWACCM),
88 NULL, hwaccmR3Save, NULL,
89 NULL, hwaccmR3Load, NULL);
90 if (VBOX_FAILURE(rc))
91 return rc;
92
93 /** @todo Make sure both pages are either not accessible or readonly! */
94 /* Allocate one page for VMXON. */
95 pVM->hwaccm.s.vmx.pVMXON = SUPContAlloc(1, &pVM->hwaccm.s.vmx.pVMXONPhys);
96 if (pVM->hwaccm.s.vmx.pVMXON == 0)
97 {
98 AssertMsgFailed(("SUPContAlloc failed!!\n"));
99 return VERR_NO_MEMORY;
100 }
101 memset(pVM->hwaccm.s.vmx.pVMXON, 0, PAGE_SIZE);
102
103 /* Allocate one page for the VM control structure (VMCS). */
104 pVM->hwaccm.s.vmx.pVMCS = SUPContAlloc(1, &pVM->hwaccm.s.vmx.pVMCSPhys);
105 if (pVM->hwaccm.s.vmx.pVMCS == 0)
106 {
107 AssertMsgFailed(("SUPContAlloc failed!!\n"));
108 return VERR_NO_MEMORY;
109 }
110 memset(pVM->hwaccm.s.vmx.pVMCS, 0, PAGE_SIZE);
111
112 /* Allocate one page for the TSS we need for real mode emulation. */
113 pVM->hwaccm.s.vmx.pRealModeTSS = (PVBOXTSS)SUPContAlloc(1, &pVM->hwaccm.s.vmx.pRealModeTSSPhys);
114 if (pVM->hwaccm.s.vmx.pRealModeTSS == 0)
115 {
116 AssertMsgFailed(("SUPContAlloc failed!!\n"));
117 return VERR_NO_MEMORY;
118 }
119 /* We initialize it properly later as we can reuse it for SVM */
120 memset(pVM->hwaccm.s.vmx.pRealModeTSS, 0, PAGE_SIZE);
121
122 /* Reuse those three pages for AMD SVM. (one is active; never both) */
123 pVM->hwaccm.s.svm.pHState = pVM->hwaccm.s.vmx.pVMXON;
124 pVM->hwaccm.s.svm.pHStatePhys = pVM->hwaccm.s.vmx.pVMXONPhys;
125 pVM->hwaccm.s.svm.pVMCB = pVM->hwaccm.s.vmx.pVMCS;
126 pVM->hwaccm.s.svm.pVMCBPhys = pVM->hwaccm.s.vmx.pVMCSPhys;
127 pVM->hwaccm.s.svm.pVMCBHost = pVM->hwaccm.s.vmx.pRealModeTSS;
128 pVM->hwaccm.s.svm.pVMCBHostPhys = pVM->hwaccm.s.vmx.pRealModeTSSPhys;
129
130 /* Allocate 12 KB for the IO bitmap (doesn't seem to be a way to convince SVM not to use it) */
131 pVM->hwaccm.s.svm.pIOBitmap = SUPContAlloc(3, &pVM->hwaccm.s.svm.pIOBitmapPhys);
132 if (pVM->hwaccm.s.svm.pIOBitmap == 0)
133 {
134 AssertMsgFailed(("SUPContAlloc failed!!\n"));
135 return VERR_NO_MEMORY;
136 }
137 /* Set all bits to intercept all IO accesses. */
138 memset(pVM->hwaccm.s.svm.pIOBitmap, 0xff, PAGE_SIZE*3);
139
140 /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */
141 pVM->hwaccm.s.svm.pMSRBitmap = SUPContAlloc(2, &pVM->hwaccm.s.svm.pMSRBitmapPhys);
142 if (pVM->hwaccm.s.svm.pMSRBitmap == 0)
143 {
144 AssertMsgFailed(("SUPContAlloc failed!!\n"));
145 return VERR_NO_MEMORY;
146 }
147 /* Set all bits to intercept all MSR accesses. */
148 memset(pVM->hwaccm.s.svm.pMSRBitmap, 0xff, PAGE_SIZE*2);
149
150 /* Misc initialisation. */
151 pVM->hwaccm.s.vmx.fSupported = false;
152 pVM->hwaccm.s.svm.fSupported = false;
153 pVM->hwaccm.s.vmx.fEnabled = false;
154 pVM->hwaccm.s.svm.fEnabled = false;
155
156 pVM->hwaccm.s.fActive = false;
157
158 /* On first entry we'll sync everything. */
159 pVM->hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
160
161 pVM->hwaccm.s.vmx.cr0_mask = 0;
162 pVM->hwaccm.s.vmx.cr4_mask = 0;
163
164 /*
165 * Statistics.
166 */
167 STAM_REG(pVM, &pVM->hwaccm.s.StatEntry, STAMTYPE_PROFILE, "/PROF/HWACCM/SwitchToGC", STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode entry");
168 STAM_REG(pVM, &pVM->hwaccm.s.StatExit, STAMTYPE_PROFILE, "/PROF/HWACCM/SwitchFromGC", STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit");
169 STAM_REG(pVM, &pVM->hwaccm.s.StatInGC, STAMTYPE_PROFILE, "/PROF/HWACCM/InGC", STAMUNIT_TICKS_PER_CALL, "Profiling of vmlaunch");
170
171 STAM_REG(pVM, &pVM->hwaccm.s.StatExitShadowNM, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Shadow/#NM", STAMUNIT_OCCURENCES, "Nr of occurances");
172 STAM_REG(pVM, &pVM->hwaccm.s.StatExitGuestNM, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Guest/#NM", STAMUNIT_OCCURENCES, "Nr of occurances");
173 STAM_REG(pVM, &pVM->hwaccm.s.StatExitShadowPF, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Shadow/#PF", STAMUNIT_OCCURENCES, "Nr of occurances");
174 STAM_REG(pVM, &pVM->hwaccm.s.StatExitGuestPF, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Guest/#PF", STAMUNIT_OCCURENCES, "Nr of occurances");
175 STAM_REG(pVM, &pVM->hwaccm.s.StatExitGuestUD, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Guest/#UD", STAMUNIT_OCCURENCES, "Nr of occurances");
176 STAM_REG(pVM, &pVM->hwaccm.s.StatExitGuestSS, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Guest/#SS", STAMUNIT_OCCURENCES, "Nr of occurances");
177 STAM_REG(pVM, &pVM->hwaccm.s.StatExitGuestNP, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Guest/#NP", STAMUNIT_OCCURENCES, "Nr of occurances");
178 STAM_REG(pVM, &pVM->hwaccm.s.StatExitGuestGP, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Guest/#GP", STAMUNIT_OCCURENCES, "Nr of occurances");
179 STAM_REG(pVM, &pVM->hwaccm.s.StatExitGuestMF, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Guest/#MF", STAMUNIT_OCCURENCES, "Nr of occurances");
180 STAM_REG(pVM, &pVM->hwaccm.s.StatExitGuestDE, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Guest/#DE", STAMUNIT_OCCURENCES, "Nr of occurances");
181 STAM_REG(pVM, &pVM->hwaccm.s.StatExitInvpg, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/Invlpg", STAMUNIT_OCCURENCES, "Nr of occurances");
182 STAM_REG(pVM, &pVM->hwaccm.s.StatExitInvd, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/Invd", STAMUNIT_OCCURENCES, "Nr of occurances");
183 STAM_REG(pVM, &pVM->hwaccm.s.StatExitCpuid, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/Cpuid", STAMUNIT_OCCURENCES, "Nr of occurances");
184 STAM_REG(pVM, &pVM->hwaccm.s.StatExitRdtsc, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/Rdtsc", STAMUNIT_OCCURENCES, "Nr of occurances");
185 STAM_REG(pVM, &pVM->hwaccm.s.StatExitCRxWrite, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/CRx/Write", STAMUNIT_OCCURENCES, "Nr of occurances");
186 STAM_REG(pVM, &pVM->hwaccm.s.StatExitCRxRead, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/CRx/Read", STAMUNIT_OCCURENCES, "Nr of occurances");
187 STAM_REG(pVM, &pVM->hwaccm.s.StatExitDRxWrite, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/DRx/Write", STAMUNIT_OCCURENCES, "Nr of occurances");
188 STAM_REG(pVM, &pVM->hwaccm.s.StatExitDRxRead, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/DRx/Read", STAMUNIT_OCCURENCES, "Nr of occurances");
189 STAM_REG(pVM, &pVM->hwaccm.s.StatExitCLTS, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/CLTS", STAMUNIT_OCCURENCES, "Nr of occurances");
190 STAM_REG(pVM, &pVM->hwaccm.s.StatExitLMSW, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/LMSW", STAMUNIT_OCCURENCES, "Nr of occurances");
191 STAM_REG(pVM, &pVM->hwaccm.s.StatExitIOWrite, STAMTYPE_COUNTER, "/HWACCM/Exit/IO/Write", STAMUNIT_OCCURENCES, "Nr of occurances");
192 STAM_REG(pVM, &pVM->hwaccm.s.StatExitIORead, STAMTYPE_COUNTER, "/HWACCM/Exit/IO/Read", STAMUNIT_OCCURENCES, "Nr of occurances");
193 STAM_REG(pVM, &pVM->hwaccm.s.StatExitIOStringWrite, STAMTYPE_COUNTER, "/HWACCM/Exit/IO/WriteString", STAMUNIT_OCCURENCES, "Nr of occurances");
194 STAM_REG(pVM, &pVM->hwaccm.s.StatExitIOStringRead, STAMTYPE_COUNTER, "/HWACCM/Exit/IO/ReadString", STAMUNIT_OCCURENCES, "Nr of occurances");
195 STAM_REG(pVM, &pVM->hwaccm.s.StatExitIrqWindow, STAMTYPE_COUNTER, "/HWACCM/Exit/GuestIrq/Pending", STAMUNIT_OCCURENCES, "Nr of occurances");
196
197 STAM_REG(pVM, &pVM->hwaccm.s.StatSwitchGuestIrq,STAMTYPE_COUNTER, "/HWACCM/Switch/IrqPending", STAMUNIT_OCCURENCES, "Nr of occurances");
198 STAM_REG(pVM, &pVM->hwaccm.s.StatSwitchToR3, STAMTYPE_COUNTER, "/HWACCM/Switch/ToR3", STAMUNIT_OCCURENCES, "Nr of occurances");
199
200 STAM_REG(pVM, &pVM->hwaccm.s.StatIntInject, STAMTYPE_COUNTER, "/HWACCM/Irq/Inject", STAMUNIT_OCCURENCES, "Nr of occurances");
201 STAM_REG(pVM, &pVM->hwaccm.s.StatIntReinject, STAMTYPE_COUNTER, "/HWACCM/Irq/Reinject", STAMUNIT_OCCURENCES, "Nr of occurances");
202 STAM_REG(pVM, &pVM->hwaccm.s.StatPendingHostIrq,STAMTYPE_COUNTER, "/HWACCM/Irq/PendingOnHost", STAMUNIT_OCCURENCES, "Nr of occurances");
203
204 pVM->hwaccm.s.pStatExitReason = 0;
205
206#ifdef VBOX_WITH_STATISTICS
207 rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT*sizeof(*pVM->hwaccm.s.pStatExitReason), 0, MM_TAG_HWACCM, (void **)&pVM->hwaccm.s.pStatExitReason);
208 AssertRC(rc);
209 if (VBOX_SUCCESS(rc))
210 {
211 for (int i=0;i<MAX_EXITREASON_STAT;i++)
212 {
213 char szName[64];
214 RTStrPrintf(szName, sizeof(szName), "/HWACCM/Exit/Reason/%02x", i);
215 int rc = STAMR3Register(pVM, &pVM->hwaccm.s.pStatExitReason[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "Exit reason");
216 AssertRC(rc);
217 }
218 }
219 pVM->hwaccm.s.pStatExitReasonR0 = MMHyperR3ToR0(pVM, pVM->hwaccm.s.pStatExitReason);
220 Assert(pVM->hwaccm.s.pStatExitReasonR0);
221#endif
222
223 /* Disabled by default. */
224 pVM->fHWACCMEnabled = false;
225
226 /* HWACCM support must be explicitely enabled in the configuration file. */
227 pVM->hwaccm.s.fAllowed = false;
228 CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "HWVirtExt/"), "Enabled", &pVM->hwaccm.s.fAllowed);
229
230 return VINF_SUCCESS;
231}
232
233
234/**
235 * Turns off normal raw mode features
236 *
237 * @param pVM The VM to operate on.
238 */
239static void hwaccmr3DisableRawMode(PVM pVM)
240{
241 /* Disable PATM & CSAM. */
242 PATMR3AllowPatching(pVM, false);
243 CSAMDisableScanning(pVM);
244
245 /* Turn off IDT/LDT/GDT and TSS monitoring and sycing. */
246 SELMR3DisableMonitoring(pVM);
247 TRPMR3DisableMonitoring(pVM);
248
249 /* The hidden selector registers are now valid. */
250 CPUMSetHiddenSelRegsValid(pVM, true);
251
252 /* Disable the switcher code (safety precaution). */
253 VMMR3DisableSwitcher(pVM);
254
255 /* Disable mapping of the hypervisor into the shadow page table. */
256 PGMR3ChangeShwPDMappings(pVM, false);
257
258 /* Disable the switcher */
259 VMMR3DisableSwitcher(pVM);
260}
261
262/**
263 * Applies relocations to data and code managed by this
264 * component. This function will be called at init and
265 * whenever the VMM need to relocate it self inside the GC.
266 *
267 * @param pVM The VM.
268 */
269HWACCMR3DECL(void) HWACCMR3Relocate(PVM pVM)
270{
271#ifdef LOG_ENABLED
272 Log(("HWACCMR3Relocate to %VGv\n", MMHyperGetArea(pVM, 0)));
273#endif
274
275 if (pVM->hwaccm.s.fAllowed == false)
276 return ;
277
278 if (pVM->hwaccm.s.vmx.fSupported)
279 {
280 Log(("pVM->hwaccm.s.vmx.fSupported = %d\n", pVM->hwaccm.s.vmx.fSupported));
281
282 if ( pVM->hwaccm.s.fInitialized == false
283 && pVM->hwaccm.s.vmx.msr.feature_ctrl != 0)
284 {
285 uint64_t val;
286
287 LogRel(("HWACCM: Host CR4=%08X\n", pVM->hwaccm.s.vmx.hostCR4));
288 LogRel(("HWACCM: MSR_IA32_FEATURE_CONTROL = %VX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
289 LogRel(("HWACCM: MSR_IA32_VMX_BASIC_INFO = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_basic_info));
290 LogRel(("HWACCM: VMCS id = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
291 LogRel(("HWACCM: VMCS size = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
292 LogRel(("HWACCM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hwaccm.s.vmx.msr.vmx_basic_info) ? "< 4 GB" : "None"));
293 LogRel(("HWACCM: VMCS memory type = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
294 LogRel(("HWACCM: Dual monitor treatment = %d\n", MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
295
296 LogRel(("HWACCM: MSR_IA32_VMX_PINBASED_CTLS = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_pin_ctls));
297 val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls >> 32ULL;
298 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
299 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT\n"));
300 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
301 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT\n"));
302 val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls;
303 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
304 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT *must* be set\n"));
305 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
306 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT *must* be set\n"));
307
308 LogRel(("HWACCM: MSR_IA32_VMX_PROCBASED_CTLS = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls));
309 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls >> 32ULL;
310 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
311 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT\n"));
312 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
313 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET\n"));
314 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
315 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT\n"));
316 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
317 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT\n"));
318 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
319 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT\n"));
320 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
321 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT\n"));
322 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
323 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT\n"));
324 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
325 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT\n"));
326 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
327 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT\n"));
328 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
329 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW\n"));
330 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
331 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT\n"));
332 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
333 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT\n"));
334 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
335 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS\n"));
336 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
337 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS\n"));
338 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
339 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT\n"));
340 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
341 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT\n"));
342 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls;
343 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
344 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT *must* be set\n"));
345 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
346 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET *must* be set\n"));
347 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
348 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT *must* be set\n"));
349 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
350 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT *must* be set\n"));
351 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
352 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT *must* be set\n"));
353 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
354 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT *must* be set\n"));
355 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
356 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT *must* be set\n"));
357 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
358 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT *must* be set\n"));
359 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
360 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT *must* be set\n"));
361 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
362 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW *must* be set\n"));
363 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
364 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT *must* be set\n"));
365 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
366 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT *must* be set\n"));
367 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
368 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS *must* be set\n"));
369 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
370 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS *must* be set\n"));
371 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
372 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT *must* be set\n"));
373 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
374 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT *must* be set\n"));
375
376 LogRel(("HWACCM: MSR_IA32_VMX_ENTRY_CTLS = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_entry));
377 val = pVM->hwaccm.s.vmx.msr.vmx_entry >> 32ULL;
378 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
379 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE\n"));
380 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
381 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM\n"));
382 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
383 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON\n"));
384 val = pVM->hwaccm.s.vmx.msr.vmx_entry;
385 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
386 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE *must* be set\n"));
387 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
388 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM *must* be set\n"));
389 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
390 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON *must* be set\n"));
391
392 LogRel(("HWACCM: MSR_IA32_VMX_EXIT_CTLS = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_exit));
393 val = pVM->hwaccm.s.vmx.msr.vmx_exit >> 32ULL;
394 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
395 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64\n"));
396 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
397 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ\n"));
398 val = pVM->hwaccm.s.vmx.msr.vmx_exit;
399 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
400 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64 *must* be set\n"));
401 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
402 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ *must* be set\n"));
403
404 LogRel(("HWACCM: MSR_IA32_VMX_MISC = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_misc));
405 LogRel(("HWACCM: MSR_IA32_VMX_MISC_ACTIVITY_STATES %x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hwaccm.s.vmx.msr.vmx_misc)));
406 LogRel(("HWACCM: MSR_IA32_VMX_MISC_CR3_TARGET %x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hwaccm.s.vmx.msr.vmx_misc)));
407 LogRel(("HWACCM: MSR_IA32_VMX_MISC_MAX_MSR %x\n", MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc)));
408 LogRel(("HWACCM: MSR_IA32_VMX_MISC_MSEG_ID %x\n", MSR_IA32_VMX_MISC_MSEG_ID(pVM->hwaccm.s.vmx.msr.vmx_misc)));
409
410 LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED0 = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0));
411 LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED1 = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1));
412 LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED0 = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0));
413 LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED1 = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1));
414 LogRel(("HWACCM: MSR_IA32_VMX_VMCS_ENUM = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum));
415
416 /* Only try once. */
417 pVM->hwaccm.s.fInitialized = true;
418
419 /* The I/O bitmap starts right after the virtual interrupt redirection bitmap. Outside the TSS on purpose; the CPU will not check it
420 * for I/O operations. */
421 pVM->hwaccm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS);
422 /* Bit set to 0 means redirection enabled. */
423 memset(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap, 0x0, sizeof(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap));
424
425 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
426 AssertRC(rc);
427 if (rc == VINF_SUCCESS)
428 {
429 hwaccmr3DisableRawMode(pVM);
430
431 pVM->fHWACCMEnabled = true;
432 pVM->hwaccm.s.vmx.fEnabled = true;
433 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
434 LogRel(("HWACCM: VMX enabled!\n"));
435 }
436 else
437 {
438 LogRel(("HWACCM: VMX setup failed with rc=%Vrc!\n", rc));
439 LogRel(("HWACCM: Last instruction error %x\n", pVM->hwaccm.s.vmx.ulLastInstrError));
440 pVM->fHWACCMEnabled = false;
441 }
442 }
443 }
444 else
445 if (pVM->hwaccm.s.svm.fSupported)
446 {
447 Log(("pVM->hwaccm.s.svm.fSupported = %d\n", pVM->hwaccm.s.svm.fSupported));
448
449 if (pVM->hwaccm.s.fInitialized == false)
450 {
451 LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureECX = %VX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureECX));
452 LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureEDX = %VX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureEDX));
453 LogRel(("HWACCM: SVM revision = %X\n", pVM->hwaccm.s.svm.u32Rev));
454 LogRel(("HWACCM: SVM max ASID = %d\n", pVM->hwaccm.s.svm.u32MaxASID));
455
456 /* Only try once. */
457 pVM->hwaccm.s.fInitialized = true;
458
459 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
460 AssertRC(rc);
461 if (rc == VINF_SUCCESS)
462 {
463 hwaccmr3DisableRawMode(pVM);
464 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
465
466 pVM->fHWACCMEnabled = true;
467 pVM->hwaccm.s.svm.fEnabled = true;
468 }
469 else
470 {
471 pVM->fHWACCMEnabled = false;
472 }
473 }
474 }
475 else
476 if (pVM->hwaccm.s.fHWACCMR0Init)
477 {
478 LogRel(("HWACCM: No VMX or SVM CPU extension found. Reason %Vrc\n", pVM->hwaccm.s.lLastError));
479 LogRel(("HWACCM: VMX MSR_IA32_FEATURE_CONTROL=%VX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
480 }
481
482}
483
484
485/**
486 * Checks hardware accelerated raw mode is allowed.
487 *
488 * @returns boolean
489 * @param pVM The VM to operate on.
490 */
491HWACCMR3DECL(bool) HWACCMR3IsAllowed(PVM pVM)
492{
493 return pVM->hwaccm.s.fAllowed;
494}
495
496
497/**
498 * Notification callback which is called whenever there is a chance that a CR3
499 * value might have changed.
500 * This is called by PGM.
501 *
502 * @param pVM The VM to operate on.
503 * @param enmShadowMode New paging mode.
504 */
505HWACCMR3DECL(void) HWACCMR3PagingModeChanged(PVM pVM, PGMMODE enmShadowMode)
506{
507 pVM->hwaccm.s.enmShadowMode = enmShadowMode;
508}
509
510/**
511 * Terminates the HWACCM.
512 *
513 * Termination means cleaning up and freeing all resources,
514 * the VM it self is at this point powered off or suspended.
515 *
516 * @returns VBox status code.
517 * @param pVM The VM to operate on.
518 */
519HWACCMR3DECL(int) HWACCMR3Term(PVM pVM)
520{
521 if (pVM->hwaccm.s.pStatExitReason)
522 {
523 MMHyperFree(pVM, pVM->hwaccm.s.pStatExitReason);
524 pVM->hwaccm.s.pStatExitReason = 0;
525 }
526
527 if (pVM->hwaccm.s.vmx.pVMXON)
528 {
529 SUPContFree(pVM->hwaccm.s.vmx.pVMXON, 1);
530 pVM->hwaccm.s.vmx.pVMXON = 0;
531 }
532 if (pVM->hwaccm.s.vmx.pVMCS)
533 {
534 SUPContFree(pVM->hwaccm.s.vmx.pVMCS, 1);
535 pVM->hwaccm.s.vmx.pVMCS = 0;
536 }
537 if (pVM->hwaccm.s.vmx.pRealModeTSS)
538 {
539 SUPContFree(pVM->hwaccm.s.vmx.pRealModeTSS, 1);
540 pVM->hwaccm.s.vmx.pRealModeTSS = 0;
541 }
542 if (pVM->hwaccm.s.svm.pIOBitmap)
543 {
544 SUPContFree(pVM->hwaccm.s.svm.pIOBitmap, 3);
545 pVM->hwaccm.s.svm.pIOBitmap = 0;
546 }
547 if (pVM->hwaccm.s.svm.pMSRBitmap)
548 {
549 SUPContFree(pVM->hwaccm.s.svm.pMSRBitmap, 2);
550 pVM->hwaccm.s.svm.pMSRBitmap = 0;
551 }
552 return 0;
553}
554
555
556/**
557 * The VM is being reset.
558 *
559 * For the HWACCM component this means that any GDT/LDT/TSS monitors
560 * needs to be removed.
561 *
562 * @param pVM VM handle.
563 */
564HWACCMR3DECL(void) HWACCMR3Reset(PVM pVM)
565{
566 LogFlow(("HWACCMR3Reset:\n"));
567
568 if (pVM->fHWACCMEnabled)
569 hwaccmr3DisableRawMode(pVM);
570
571 /* On first entry we'll sync everything. */
572 pVM->hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
573
574 pVM->hwaccm.s.vmx.cr0_mask = 0;
575 pVM->hwaccm.s.vmx.cr4_mask = 0;
576
577 pVM->hwaccm.s.Event.fPending = false;
578}
579
580/**
581 * Checks if we can currently use hardware accelerated raw mode.
582 *
583 * @returns boolean
584 * @param pVM The VM to operate on.
585 * @param pCtx Partial VM execution context
586 */
587HWACCMR3DECL(bool) HWACCMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)
588{
589 uint32_t mask;
590
591 Assert(pVM->fHWACCMEnabled);
592
593 /* AMD SVM supports real & protected mode with or without paging. */
594 if (pVM->hwaccm.s.svm.fEnabled)
595 {
596 pVM->hwaccm.s.fActive = true;
597 return true;
598 }
599
600 /* @todo we can support real-mode by using v86 and protected mode without paging with identity mapped pages.
601 * (but do we really care?)
602 */
603
604 pVM->hwaccm.s.fActive = false;
605
606 /** @note The context supplied by REM is partial. If we add more checks here, be sure to verify that REM provides this info! */
607
608#ifndef HWACCM_VMX_EMULATE_ALL
609 /* Too early for VMX. */
610 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr == 0)
611 return false;
612
613 /* The guest is about to complete the switch to protected mode. Wait a bit longer. */
614 if (pCtx->csHid.Attr.n.u1Present == 0)
615 return false;
616 if (pCtx->ssHid.Attr.n.u1Present == 0)
617 return false;
618
619 /** @todo if we remove this check, then Windows XP install fails during the textmode phase */
620 if (!(pCtx->cr0 & X86_CR0_WRITE_PROTECT))
621 return false;
622#endif
623
624 if (pVM->hwaccm.s.vmx.fEnabled)
625 {
626 /* if bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
627 mask = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0;
628 /* Note: We ignore the NE bit here on purpose; see vmmr0\hwaccmr0.cpp for details. */
629 mask &= ~X86_CR0_NE;
630#ifdef HWACCM_VMX_EMULATE_ALL
631 /* Note: We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
632 mask &= ~(X86_CR0_PG|X86_CR0_PE);
633#endif
634 if ((pCtx->cr0 & mask) != mask)
635 return false;
636
637 /* if bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
638 mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1;
639 if ((pCtx->cr0 & mask) != 0)
640 return false;
641
642 /* if bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
643 mask = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0;
644 mask &= ~X86_CR4_VMXE;
645 if ((pCtx->cr4 & mask) != mask)
646 return false;
647
648 /* if bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
649 mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1;
650 if ((pCtx->cr4 & mask) != 0)
651 return false;
652
653 pVM->hwaccm.s.fActive = true;
654 return true;
655 }
656
657 return false;
658}
659
660/**
661 * Checks if we are currently using hardware accelerated raw mode.
662 *
663 * @returns boolean
664 * @param pVM The VM to operate on.
665 */
666HWACCMR3DECL(bool) HWACCMR3IsActive(PVM pVM)
667{
668 return pVM->hwaccm.s.fActive;
669}
670
671/**
672 * Checks if internal events are pending. In that case we are not allowed to dispatch interrupts.
673 *
674 * @returns boolean
675 * @param pVM The VM to operate on.
676 */
677HWACCMR3DECL(bool) HWACCMR3IsEventPending(PVM pVM)
678{
679 return HWACCMIsEnabled(pVM) && pVM->hwaccm.s.Event.fPending;
680}
681
682/**
683 * Execute state save operation.
684 *
685 * @returns VBox status code.
686 * @param pVM VM Handle.
687 * @param pSSM SSM operation handle.
688 */
689static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM)
690{
691 int rc;
692
693 Log(("hwaccmR3Save:\n"));
694
695 /*
696 * Save the basic bits - fortunately all the other things can be resynced on load.
697 */
698 rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.Event.fPending);
699 AssertRCReturn(rc, rc);
700 rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.Event.errCode);
701 AssertRCReturn(rc, rc);
702 rc = SSMR3PutU64(pSSM, pVM->hwaccm.s.Event.intInfo);
703 AssertRCReturn(rc, rc);
704
705 return VINF_SUCCESS;
706}
707
708
709/**
710 * Execute state load operation.
711 *
712 * @returns VBox status code.
713 * @param pVM VM Handle.
714 * @param pSSM SSM operation handle.
715 * @param u32Version Data layout version.
716 */
717static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
718{
719 int rc;
720
721 Log(("hwaccmR3Load:\n"));
722
723 /*
724 * Validate version.
725 */
726 if (u32Version != HWACCM_SSM_VERSION)
727 {
728 Log(("hwaccmR3Load: Invalid version u32Version=%d!\n", u32Version));
729 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
730 }
731 rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.Event.fPending);
732 AssertRCReturn(rc, rc);
733 rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.Event.errCode);
734 AssertRCReturn(rc, rc);
735 rc = SSMR3GetU64(pSSM, &pVM->hwaccm.s.Event.intInfo);
736 AssertRCReturn(rc, rc);
737
738 return VINF_SUCCESS;
739}
740
741
742
743
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette