1 | /* $Id: HWACCMR0.cpp 28800 2010-04-27 08:22:32Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * HWACCM - Host Context Ring 0.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2006-2007 Oracle Corporation
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
10 | * available from http://www.virtualbox.org. This file is free software;
|
---|
11 | * you can redistribute it and/or modify it under the terms of the GNU
|
---|
12 | * General Public License (GPL) as published by the Free Software
|
---|
13 | * Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
14 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
15 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
16 | */
|
---|
17 |
|
---|
18 |
|
---|
19 | /*******************************************************************************
|
---|
20 | * Header Files *
|
---|
21 | *******************************************************************************/
|
---|
22 | #define LOG_GROUP LOG_GROUP_HWACCM
|
---|
23 | #include <VBox/hwaccm.h>
|
---|
24 | #include <VBox/pgm.h>
|
---|
25 | #include "HWACCMInternal.h"
|
---|
26 | #include <VBox/vm.h>
|
---|
27 | #include <VBox/x86.h>
|
---|
28 | #include <VBox/hwacc_vmx.h>
|
---|
29 | #include <VBox/hwacc_svm.h>
|
---|
30 | #include <VBox/err.h>
|
---|
31 | #include <VBox/log.h>
|
---|
32 | #include <iprt/assert.h>
|
---|
33 | #include <iprt/asm.h>
|
---|
34 | #include <iprt/cpuset.h>
|
---|
35 | #include <iprt/memobj.h>
|
---|
36 | #include <iprt/param.h>
|
---|
37 | #include <iprt/power.h>
|
---|
38 | #include <iprt/string.h>
|
---|
39 | #include <iprt/thread.h>
|
---|
40 | #include "HWVMXR0.h"
|
---|
41 | #include "HWSVMR0.h"
|
---|
42 |
|
---|
43 | /*******************************************************************************
|
---|
44 | * Internal Functions *
|
---|
45 | *******************************************************************************/
|
---|
46 | static DECLCALLBACK(void) hwaccmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2);
|
---|
47 | static DECLCALLBACK(void) hwaccmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2);
|
---|
48 | static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
|
---|
49 | static int hwaccmR0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu);
|
---|
50 | static DECLCALLBACK(void) hwaccmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser);
|
---|
51 |
|
---|
52 | /*******************************************************************************
|
---|
53 | * Global Variables *
|
---|
54 | *******************************************************************************/
|
---|
55 |
|
---|
56 | static struct
|
---|
57 | {
|
---|
58 | HWACCM_CPUINFO aCpuInfo[RTCPUSET_MAX_CPUS];
|
---|
59 |
|
---|
60 | /** Ring 0 handlers for VT-x and AMD-V. */
|
---|
61 | DECLR0CALLBACKMEMBER(int, pfnEnterSession,(PVM pVM, PVMCPU pVCpu, PHWACCM_CPUINFO pCpu));
|
---|
62 | DECLR0CALLBACKMEMBER(int, pfnLeaveSession,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
|
---|
63 | DECLR0CALLBACKMEMBER(int, pfnSaveHostState,(PVM pVM, PVMCPU pVCpu));
|
---|
64 | DECLR0CALLBACKMEMBER(int, pfnLoadGuestState,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
|
---|
65 | DECLR0CALLBACKMEMBER(int, pfnRunGuestCode,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
|
---|
66 | DECLR0CALLBACKMEMBER(int, pfnEnableCpu, (PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys));
|
---|
67 | DECLR0CALLBACKMEMBER(int, pfnDisableCpu, (PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys));
|
---|
68 | DECLR0CALLBACKMEMBER(int, pfnInitVM, (PVM pVM));
|
---|
69 | DECLR0CALLBACKMEMBER(int, pfnTermVM, (PVM pVM));
|
---|
70 | DECLR0CALLBACKMEMBER(int, pfnSetupVM, (PVM pVM));
|
---|
71 |
|
---|
72 | /** Maximum ASID allowed. */
|
---|
73 | uint32_t uMaxASID;
|
---|
74 |
|
---|
75 | struct
|
---|
76 | {
|
---|
77 | /** Set by the ring-0 driver to indicate VMX is supported by the CPU. */
|
---|
78 | bool fSupported;
|
---|
79 | /** Whether we're using SUPR0EnableVTx or not. */
|
---|
80 | bool fUsingSUPR0EnableVTx;
|
---|
81 |
|
---|
82 | /** Host CR4 value (set by ring-0 VMX init) */
|
---|
83 | uint64_t hostCR4;
|
---|
84 |
|
---|
85 | /** Host EFER value (set by ring-0 VMX init) */
|
---|
86 | uint64_t hostEFER;
|
---|
87 |
|
---|
88 | /** VMX MSR values */
|
---|
89 | struct
|
---|
90 | {
|
---|
91 | uint64_t feature_ctrl;
|
---|
92 | uint64_t vmx_basic_info;
|
---|
93 | VMX_CAPABILITY vmx_pin_ctls;
|
---|
94 | VMX_CAPABILITY vmx_proc_ctls;
|
---|
95 | VMX_CAPABILITY vmx_proc_ctls2;
|
---|
96 | VMX_CAPABILITY vmx_exit;
|
---|
97 | VMX_CAPABILITY vmx_entry;
|
---|
98 | uint64_t vmx_misc;
|
---|
99 | uint64_t vmx_cr0_fixed0;
|
---|
100 | uint64_t vmx_cr0_fixed1;
|
---|
101 | uint64_t vmx_cr4_fixed0;
|
---|
102 | uint64_t vmx_cr4_fixed1;
|
---|
103 | uint64_t vmx_vmcs_enum;
|
---|
104 | uint64_t vmx_eptcaps;
|
---|
105 | } msr;
|
---|
106 | /* Last instruction error */
|
---|
107 | uint32_t ulLastInstrError;
|
---|
108 | } vmx;
|
---|
109 | struct
|
---|
110 | {
|
---|
111 | /* HWCR msr (for diagnostics) */
|
---|
112 | uint64_t msrHWCR;
|
---|
113 |
|
---|
114 | /** SVM revision. */
|
---|
115 | uint32_t u32Rev;
|
---|
116 |
|
---|
117 | /** SVM feature bits from cpuid 0x8000000a */
|
---|
118 | uint32_t u32Features;
|
---|
119 |
|
---|
120 | /** Set by the ring-0 driver to indicate SVM is supported by the CPU. */
|
---|
121 | bool fSupported;
|
---|
122 | } svm;
|
---|
123 | /** Saved error from detection */
|
---|
124 | int32_t lLastError;
|
---|
125 |
|
---|
126 | struct
|
---|
127 | {
|
---|
128 | uint32_t u32AMDFeatureECX;
|
---|
129 | uint32_t u32AMDFeatureEDX;
|
---|
130 | } cpuid;
|
---|
131 |
|
---|
132 | HWACCMSTATE enmHwAccmState;
|
---|
133 |
|
---|
134 | bool fGlobalInit;
|
---|
135 | volatile bool fSuspended;
|
---|
136 | } HWACCMR0Globals;
|
---|
137 |
|
---|
138 |
|
---|
139 |
|
---|
140 | /**
|
---|
141 | * Does global Ring-0 HWACCM initialization.
|
---|
142 | *
|
---|
143 | * @returns VBox status code.
|
---|
144 | */
|
---|
145 | VMMR0DECL(int) HWACCMR0Init(void)
|
---|
146 | {
|
---|
147 | int rc;
|
---|
148 | bool fAMDVPresent = false;
|
---|
149 |
|
---|
150 | memset(&HWACCMR0Globals, 0, sizeof(HWACCMR0Globals));
|
---|
151 | HWACCMR0Globals.enmHwAccmState = HWACCMSTATE_UNINITIALIZED;
|
---|
152 | for (unsigned i = 0; i < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); i++)
|
---|
153 | HWACCMR0Globals.aCpuInfo[i].pMemObj = NIL_RTR0MEMOBJ;
|
---|
154 |
|
---|
155 | /* Fill in all callbacks with placeholders. */
|
---|
156 | HWACCMR0Globals.pfnEnterSession = HWACCMR0DummyEnter;
|
---|
157 | HWACCMR0Globals.pfnLeaveSession = HWACCMR0DummyLeave;
|
---|
158 | HWACCMR0Globals.pfnSaveHostState = HWACCMR0DummySaveHostState;
|
---|
159 | HWACCMR0Globals.pfnLoadGuestState = HWACCMR0DummyLoadGuestState;
|
---|
160 | HWACCMR0Globals.pfnRunGuestCode = HWACCMR0DummyRunGuestCode;
|
---|
161 | HWACCMR0Globals.pfnEnableCpu = HWACCMR0DummyEnableCpu;
|
---|
162 | HWACCMR0Globals.pfnDisableCpu = HWACCMR0DummyDisableCpu;
|
---|
163 | HWACCMR0Globals.pfnInitVM = HWACCMR0DummyInitVM;
|
---|
164 | HWACCMR0Globals.pfnTermVM = HWACCMR0DummyTermVM;
|
---|
165 | HWACCMR0Globals.pfnSetupVM = HWACCMR0DummySetupVM;
|
---|
166 |
|
---|
167 | /* Default is global VT-x/AMD-V init */
|
---|
168 | HWACCMR0Globals.fGlobalInit = true;
|
---|
169 |
|
---|
170 | /*
|
---|
171 | * Check for VT-x and AMD-V capabilities
|
---|
172 | */
|
---|
173 | if (ASMHasCpuId())
|
---|
174 | {
|
---|
175 | uint32_t u32FeaturesECX;
|
---|
176 | uint32_t u32Dummy;
|
---|
177 | uint32_t u32FeaturesEDX;
|
---|
178 | uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX;
|
---|
179 |
|
---|
180 | ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);
|
---|
181 | ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
|
---|
182 | /* Query AMD features. */
|
---|
183 | ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &HWACCMR0Globals.cpuid.u32AMDFeatureECX, &HWACCMR0Globals.cpuid.u32AMDFeatureEDX);
|
---|
184 |
|
---|
185 | if ( u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX
|
---|
186 | && u32VendorECX == X86_CPUID_VENDOR_INTEL_ECX
|
---|
187 | && u32VendorEDX == X86_CPUID_VENDOR_INTEL_EDX
|
---|
188 | )
|
---|
189 | {
|
---|
190 | /*
|
---|
191 | * Read all VMX MSRs if VMX is available. (same goes for RDMSR/WRMSR)
|
---|
192 | * We also assume all VMX-enabled CPUs support fxsave/fxrstor.
|
---|
193 | */
|
---|
194 | if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
|
---|
195 | && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
|
---|
196 | && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
|
---|
197 | )
|
---|
198 | {
|
---|
199 | int aRc[RTCPUSET_MAX_CPUS];
|
---|
200 | RTCPUID idCpu = 0;
|
---|
201 |
|
---|
202 | HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
|
---|
203 |
|
---|
204 | /*
|
---|
205 | * First try use native kernel API for controlling VT-x.
|
---|
206 | * (This is only supported by some Mac OS X kernels atm.)
|
---|
207 | */
|
---|
208 | HWACCMR0Globals.lLastError = rc = SUPR0EnableVTx(true /* fEnable */);
|
---|
209 | if (rc != VERR_NOT_SUPPORTED)
|
---|
210 | {
|
---|
211 | AssertMsg(rc == VINF_SUCCESS || rc == VERR_VMX_IN_VMX_ROOT_MODE || rc == VERR_VMX_NO_VMX, ("%Rrc\n", rc));
|
---|
212 | HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx = true;
|
---|
213 | if (RT_SUCCESS(rc))
|
---|
214 | {
|
---|
215 | HWACCMR0Globals.vmx.fSupported = true;
|
---|
216 | rc = SUPR0EnableVTx(false /* fEnable */);
|
---|
217 | AssertRC(rc);
|
---|
218 | }
|
---|
219 | }
|
---|
220 | else
|
---|
221 | {
|
---|
222 | HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx = false;
|
---|
223 |
|
---|
224 | /* We need to check if VT-x has been properly initialized on all CPUs. Some BIOSes do a lousy job. */
|
---|
225 | memset(aRc, 0, sizeof(aRc));
|
---|
226 | HWACCMR0Globals.lLastError = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
|
---|
227 |
|
---|
228 | /* Check the return code of all invocations. */
|
---|
229 | if (RT_SUCCESS(HWACCMR0Globals.lLastError))
|
---|
230 | HWACCMR0Globals.lLastError = hwaccmR0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
|
---|
231 | }
|
---|
232 | if (RT_SUCCESS(HWACCMR0Globals.lLastError))
|
---|
233 | {
|
---|
234 | /* Reread in case we've changed it. */
|
---|
235 | HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
|
---|
236 |
|
---|
237 | if ( (HWACCMR0Globals.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
|
---|
238 | == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
|
---|
239 | {
|
---|
240 | RTR0MEMOBJ pScatchMemObj;
|
---|
241 | void *pvScatchPage;
|
---|
242 | RTHCPHYS pScatchPagePhys;
|
---|
243 |
|
---|
244 | HWACCMR0Globals.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
|
---|
245 | HWACCMR0Globals.vmx.msr.vmx_pin_ctls.u = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
|
---|
246 | HWACCMR0Globals.vmx.msr.vmx_proc_ctls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
|
---|
247 | HWACCMR0Globals.vmx.msr.vmx_exit.u = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
|
---|
248 | HWACCMR0Globals.vmx.msr.vmx_entry.u = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
|
---|
249 | HWACCMR0Globals.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC);
|
---|
250 | HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
|
---|
251 | HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
|
---|
252 | HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
|
---|
253 | HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
|
---|
254 | HWACCMR0Globals.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
|
---|
255 | /* VPID 16 bits ASID. */
|
---|
256 | HWACCMR0Globals.uMaxASID = 0x10000; /* exclusive */
|
---|
257 |
|
---|
258 | if (HWACCMR0Globals.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
|
---|
259 | {
|
---|
260 | HWACCMR0Globals.vmx.msr.vmx_proc_ctls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
|
---|
261 | if (HWACCMR0Globals.vmx.msr.vmx_proc_ctls2.n.allowed1 & (VMX_VMCS_CTRL_PROC_EXEC2_EPT|VMX_VMCS_CTRL_PROC_EXEC2_VPID))
|
---|
262 | HWACCMR0Globals.vmx.msr.vmx_eptcaps = ASMRdMsr(MSR_IA32_VMX_EPT_CAPS);
|
---|
263 | }
|
---|
264 |
|
---|
265 | if (!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
|
---|
266 | {
|
---|
267 | HWACCMR0Globals.vmx.hostCR4 = ASMGetCR4();
|
---|
268 | HWACCMR0Globals.vmx.hostEFER = ASMRdMsr(MSR_K6_EFER);
|
---|
269 |
|
---|
270 | rc = RTR0MemObjAllocCont(&pScatchMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
|
---|
271 | if (RT_FAILURE(rc))
|
---|
272 | return rc;
|
---|
273 |
|
---|
274 | pvScatchPage = RTR0MemObjAddress(pScatchMemObj);
|
---|
275 | pScatchPagePhys = RTR0MemObjGetPagePhysAddr(pScatchMemObj, 0);
|
---|
276 | memset(pvScatchPage, 0, PAGE_SIZE);
|
---|
277 |
|
---|
278 | /* Set revision dword at the beginning of the structure. */
|
---|
279 | *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(HWACCMR0Globals.vmx.msr.vmx_basic_info);
|
---|
280 |
|
---|
281 | /* Make sure we don't get rescheduled to another cpu during this probe. */
|
---|
282 | RTCCUINTREG fFlags = ASMIntDisableFlags();
|
---|
283 |
|
---|
284 | /*
|
---|
285 | * Check CR4.VMXE
|
---|
286 | */
|
---|
287 | if (!(HWACCMR0Globals.vmx.hostCR4 & X86_CR4_VMXE))
|
---|
288 | {
|
---|
289 | /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we
|
---|
290 | * try to execute the VMX instructions...
|
---|
291 | */
|
---|
292 | ASMSetCR4(HWACCMR0Globals.vmx.hostCR4 | X86_CR4_VMXE);
|
---|
293 | }
|
---|
294 |
|
---|
295 | /* Enter VMX Root Mode */
|
---|
296 | rc = VMXEnable(pScatchPagePhys);
|
---|
297 | if (RT_FAILURE(rc))
|
---|
298 | {
|
---|
299 | /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because
|
---|
300 | * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify this bit)
|
---|
301 | * (b) turning off paging causes a #GP (unavoidable when switching from long to 32 bits mode or 32 bits to PAE)
|
---|
302 | *
|
---|
303 | * They should fix their code, but until they do we simply refuse to run.
|
---|
304 | */
|
---|
305 | HWACCMR0Globals.lLastError = VERR_VMX_IN_VMX_ROOT_MODE;
|
---|
306 | }
|
---|
307 | else
|
---|
308 | {
|
---|
309 | HWACCMR0Globals.vmx.fSupported = true;
|
---|
310 | VMXDisable();
|
---|
311 | }
|
---|
312 |
|
---|
313 | /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it wasn't so before (some software could incorrectly think it's in VMX mode) */
|
---|
314 | ASMSetCR4(HWACCMR0Globals.vmx.hostCR4);
|
---|
315 | ASMSetFlags(fFlags);
|
---|
316 |
|
---|
317 | RTR0MemObjFree(pScatchMemObj, false);
|
---|
318 | if (RT_FAILURE(HWACCMR0Globals.lLastError))
|
---|
319 | return HWACCMR0Globals.lLastError;
|
---|
320 | }
|
---|
321 | }
|
---|
322 | else
|
---|
323 | {
|
---|
324 | AssertFailed(); /* can't hit this case anymore */
|
---|
325 | HWACCMR0Globals.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
|
---|
326 | }
|
---|
327 | }
|
---|
328 | #ifdef LOG_ENABLED
|
---|
329 | else
|
---|
330 | SUPR0Printf("HWACCMR0InitCPU failed with rc=%d\n", HWACCMR0Globals.lLastError);
|
---|
331 | #endif
|
---|
332 | }
|
---|
333 | else
|
---|
334 | HWACCMR0Globals.lLastError = VERR_VMX_NO_VMX;
|
---|
335 | }
|
---|
336 | else
|
---|
337 | if ( u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX
|
---|
338 | && u32VendorECX == X86_CPUID_VENDOR_AMD_ECX
|
---|
339 | && u32VendorEDX == X86_CPUID_VENDOR_AMD_EDX
|
---|
340 | )
|
---|
341 | {
|
---|
342 | /*
|
---|
343 | * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
|
---|
344 | * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
|
---|
345 | */
|
---|
346 | if ( (HWACCMR0Globals.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
|
---|
347 | && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
|
---|
348 | && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
|
---|
349 | )
|
---|
350 | {
|
---|
351 | int aRc[RTCPUSET_MAX_CPUS];
|
---|
352 | RTCPUID idCpu = 0;
|
---|
353 |
|
---|
354 | fAMDVPresent = true;
|
---|
355 |
|
---|
356 | /* Query AMD features. */
|
---|
357 | ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.uMaxASID, &u32Dummy, &HWACCMR0Globals.svm.u32Features);
|
---|
358 |
|
---|
359 | /* We need to check if AMD-V has been properly initialized on all CPUs. Some BIOSes might do a poor job. */
|
---|
360 | memset(aRc, 0, sizeof(aRc));
|
---|
361 | rc = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
|
---|
362 | AssertRC(rc);
|
---|
363 |
|
---|
364 | /* Check the return code of all invocations. */
|
---|
365 | if (RT_SUCCESS(rc))
|
---|
366 | rc = hwaccmR0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
|
---|
367 |
|
---|
368 | #ifndef DEBUG_bird
|
---|
369 | AssertMsg(rc == VINF_SUCCESS || rc == VERR_SVM_IN_USE, ("HWACCMR0InitCPU failed for cpu %d with rc=%d\n", idCpu, rc));
|
---|
370 | #endif
|
---|
371 | if (RT_SUCCESS(rc))
|
---|
372 | {
|
---|
373 | /* Read the HWCR msr for diagnostics. */
|
---|
374 | HWACCMR0Globals.svm.msrHWCR = ASMRdMsr(MSR_K8_HWCR);
|
---|
375 | HWACCMR0Globals.svm.fSupported = true;
|
---|
376 | }
|
---|
377 | else
|
---|
378 | HWACCMR0Globals.lLastError = rc;
|
---|
379 | }
|
---|
380 | else
|
---|
381 | HWACCMR0Globals.lLastError = VERR_SVM_NO_SVM;
|
---|
382 | }
|
---|
383 | else
|
---|
384 | HWACCMR0Globals.lLastError = VERR_HWACCM_UNKNOWN_CPU;
|
---|
385 | }
|
---|
386 | else
|
---|
387 | HWACCMR0Globals.lLastError = VERR_HWACCM_NO_CPUID;
|
---|
388 |
|
---|
389 | if (HWACCMR0Globals.vmx.fSupported)
|
---|
390 | {
|
---|
391 | HWACCMR0Globals.pfnEnterSession = VMXR0Enter;
|
---|
392 | HWACCMR0Globals.pfnLeaveSession = VMXR0Leave;
|
---|
393 | HWACCMR0Globals.pfnSaveHostState = VMXR0SaveHostState;
|
---|
394 | HWACCMR0Globals.pfnLoadGuestState = VMXR0LoadGuestState;
|
---|
395 | HWACCMR0Globals.pfnRunGuestCode = VMXR0RunGuestCode;
|
---|
396 | HWACCMR0Globals.pfnEnableCpu = VMXR0EnableCpu;
|
---|
397 | HWACCMR0Globals.pfnDisableCpu = VMXR0DisableCpu;
|
---|
398 | HWACCMR0Globals.pfnInitVM = VMXR0InitVM;
|
---|
399 | HWACCMR0Globals.pfnTermVM = VMXR0TermVM;
|
---|
400 | HWACCMR0Globals.pfnSetupVM = VMXR0SetupVM;
|
---|
401 | }
|
---|
402 | else
|
---|
403 | if (fAMDVPresent)
|
---|
404 | {
|
---|
405 | HWACCMR0Globals.pfnEnterSession = SVMR0Enter;
|
---|
406 | HWACCMR0Globals.pfnLeaveSession = SVMR0Leave;
|
---|
407 | HWACCMR0Globals.pfnSaveHostState = SVMR0SaveHostState;
|
---|
408 | HWACCMR0Globals.pfnLoadGuestState = SVMR0LoadGuestState;
|
---|
409 | HWACCMR0Globals.pfnRunGuestCode = SVMR0RunGuestCode;
|
---|
410 | HWACCMR0Globals.pfnEnableCpu = SVMR0EnableCpu;
|
---|
411 | HWACCMR0Globals.pfnDisableCpu = SVMR0DisableCpu;
|
---|
412 | HWACCMR0Globals.pfnInitVM = SVMR0InitVM;
|
---|
413 | HWACCMR0Globals.pfnTermVM = SVMR0TermVM;
|
---|
414 | HWACCMR0Globals.pfnSetupVM = SVMR0SetupVM;
|
---|
415 | }
|
---|
416 |
|
---|
417 | if (!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
|
---|
418 | {
|
---|
419 | rc = RTPowerNotificationRegister(hwaccmR0PowerCallback, 0);
|
---|
420 | AssertRC(rc);
|
---|
421 | }
|
---|
422 |
|
---|
423 | return VINF_SUCCESS;
|
---|
424 | }
|
---|
425 |
|
---|
426 |
|
---|
427 | /**
|
---|
428 | * Checks the error code array filled in for each cpu in the system.
|
---|
429 | *
|
---|
430 | * @returns VBox status code.
|
---|
431 | * @param paRc Error code array
|
---|
432 | * @param cErrorCodes Array size
|
---|
433 | * @param pidCpu Value of the first cpu that set an error (out)
|
---|
434 | */
|
---|
435 | static int hwaccmR0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu)
|
---|
436 | {
|
---|
437 | int rc = VINF_SUCCESS;
|
---|
438 |
|
---|
439 | Assert(cErrorCodes == RTCPUSET_MAX_CPUS);
|
---|
440 |
|
---|
441 | for (unsigned i=0;i<cErrorCodes;i++)
|
---|
442 | {
|
---|
443 | if (RTMpIsCpuOnline(i))
|
---|
444 | {
|
---|
445 | if (RT_FAILURE(paRc[i]))
|
---|
446 | {
|
---|
447 | rc = paRc[i];
|
---|
448 | *pidCpu = i;
|
---|
449 | break;
|
---|
450 | }
|
---|
451 | }
|
---|
452 | }
|
---|
453 | return rc;
|
---|
454 | }
|
---|
455 |
|
---|
456 | /**
|
---|
457 | * Does global Ring-0 HWACCM termination.
|
---|
458 | *
|
---|
459 | * @returns VBox status code.
|
---|
460 | */
|
---|
461 | VMMR0DECL(int) HWACCMR0Term(void)
|
---|
462 | {
|
---|
463 | int rc;
|
---|
464 | if ( HWACCMR0Globals.vmx.fSupported
|
---|
465 | && HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
|
---|
466 | {
|
---|
467 | Assert(HWACCMR0Globals.fGlobalInit);
|
---|
468 | rc = SUPR0EnableVTx(false /* fEnable */);
|
---|
469 | for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); iCpu++)
|
---|
470 | {
|
---|
471 | HWACCMR0Globals.aCpuInfo[iCpu].fConfigured = false;
|
---|
472 | Assert(HWACCMR0Globals.aCpuInfo[iCpu].pMemObj == NIL_RTR0MEMOBJ);
|
---|
473 | }
|
---|
474 | }
|
---|
475 | else
|
---|
476 | {
|
---|
477 | Assert(!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);
|
---|
478 | if (!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
|
---|
479 | {
|
---|
480 | rc = RTPowerNotificationDeregister(hwaccmR0PowerCallback, 0);
|
---|
481 | AssertRC(rc);
|
---|
482 | }
|
---|
483 | else
|
---|
484 | rc = VINF_SUCCESS;
|
---|
485 |
|
---|
486 | /* Only disable VT-x/AMD-V on all CPUs if we enabled it before. */
|
---|
487 | if (HWACCMR0Globals.fGlobalInit)
|
---|
488 | {
|
---|
489 | int aRc[RTCPUSET_MAX_CPUS];
|
---|
490 |
|
---|
491 | memset(aRc, 0, sizeof(aRc));
|
---|
492 | rc = RTMpOnAll(hwaccmR0DisableCpuCallback, aRc, NULL);
|
---|
493 | Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
|
---|
494 | #ifdef VBOX_STRICT
|
---|
495 | for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
|
---|
496 | AssertMsgRC(aRc[i], ("hwaccmR0DisableCpuCallback failed for cpu %d with rc=%d\n", i, aRc[i]));
|
---|
497 | #endif
|
---|
498 | }
|
---|
499 |
|
---|
500 | /* Free the per-cpu pages used for VT-x and AMD-V */
|
---|
501 | for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
|
---|
502 | {
|
---|
503 | if (HWACCMR0Globals.aCpuInfo[i].pMemObj != NIL_RTR0MEMOBJ)
|
---|
504 | {
|
---|
505 | RTR0MemObjFree(HWACCMR0Globals.aCpuInfo[i].pMemObj, false);
|
---|
506 | HWACCMR0Globals.aCpuInfo[i].pMemObj = NIL_RTR0MEMOBJ;
|
---|
507 | }
|
---|
508 | }
|
---|
509 | }
|
---|
510 | return rc;
|
---|
511 | }
|
---|
512 |
|
---|
513 |
|
---|
514 | /**
|
---|
515 | * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
|
---|
516 | * is to be called on the target cpus.
|
---|
517 | *
|
---|
518 | * @param idCpu The identifier for the CPU the function is called on.
|
---|
519 | * @param pvUser1 The 1st user argument.
|
---|
520 | * @param pvUser2 The 2nd user argument.
|
---|
521 | */
|
---|
522 | static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
|
---|
523 | {
|
---|
524 | unsigned u32VendorEBX = (uintptr_t)pvUser1;
|
---|
525 | int *paRc = (int *)pvUser2;
|
---|
526 | uint64_t val;
|
---|
527 |
|
---|
528 | #if defined(LOG_ENABLED) && !defined(DEBUG_bird)
|
---|
529 | SUPR0Printf("HWACCMR0InitCPU cpu %d\n", idCpu);
|
---|
530 | #endif
|
---|
531 | Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
|
---|
532 |
|
---|
533 | if (u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX)
|
---|
534 | {
|
---|
535 | val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
|
---|
536 |
|
---|
537 | /*
|
---|
538 | * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP.
|
---|
539 | * Once the lock bit is set, this MSR can no longer be modified.
|
---|
540 | */
|
---|
541 | if ( !(val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
|
---|
542 | || ((val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)) == MSR_IA32_FEATURE_CONTROL_VMXON) /* Some BIOSes forget to set the locked bit. */
|
---|
543 | )
|
---|
544 | {
|
---|
545 | /* MSR is not yet locked; we can change it ourselves here */
|
---|
546 | ASMWrMsr(MSR_IA32_FEATURE_CONTROL, HWACCMR0Globals.vmx.msr.feature_ctrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK);
|
---|
547 | val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
|
---|
548 | }
|
---|
549 | if ( (val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
|
---|
550 | == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
|
---|
551 | paRc[idCpu] = VINF_SUCCESS;
|
---|
552 | else
|
---|
553 | paRc[idCpu] = VERR_VMX_MSR_LOCKED_OR_DISABLED;
|
---|
554 | }
|
---|
555 | else
|
---|
556 | if (u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX)
|
---|
557 | {
|
---|
558 | /* Check if SVM is disabled */
|
---|
559 | val = ASMRdMsr(MSR_K8_VM_CR);
|
---|
560 | if (!(val & MSR_K8_VM_CR_SVM_DISABLE))
|
---|
561 | {
|
---|
562 | /* Turn on SVM in the EFER MSR. */
|
---|
563 | val = ASMRdMsr(MSR_K6_EFER);
|
---|
564 | if (val & MSR_K6_EFER_SVME)
|
---|
565 | {
|
---|
566 | paRc[idCpu] = VERR_SVM_IN_USE;
|
---|
567 | }
|
---|
568 | else
|
---|
569 | {
|
---|
570 | ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
|
---|
571 |
|
---|
572 | /* Paranoia. */
|
---|
573 | val = ASMRdMsr(MSR_K6_EFER);
|
---|
574 | if (val & MSR_K6_EFER_SVME)
|
---|
575 | {
|
---|
576 | /* Restore previous value. */
|
---|
577 | ASMWrMsr(MSR_K6_EFER, val & ~MSR_K6_EFER_SVME);
|
---|
578 | paRc[idCpu] = VINF_SUCCESS;
|
---|
579 | }
|
---|
580 | else
|
---|
581 | paRc[idCpu] = VERR_SVM_ILLEGAL_EFER_MSR;
|
---|
582 | }
|
---|
583 | }
|
---|
584 | else
|
---|
585 | paRc[idCpu] = VERR_SVM_DISABLED;
|
---|
586 | }
|
---|
587 | else
|
---|
588 | AssertFailed(); /* can't happen */
|
---|
589 | return;
|
---|
590 | }
|
---|
591 |
|
---|
592 |
|
---|
593 | /**
|
---|
594 | * Sets up HWACCM on all cpus.
|
---|
595 | *
|
---|
596 | * @returns VBox status code.
|
---|
597 | * @param pVM The VM to operate on.
|
---|
598 | *
|
---|
599 | */
|
---|
600 | VMMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM)
|
---|
601 | {
|
---|
602 | AssertCompile(sizeof(HWACCMR0Globals.enmHwAccmState) == sizeof(uint32_t));
|
---|
603 |
|
---|
604 | /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
|
---|
605 | if (ASMAtomicReadBool(&HWACCMR0Globals.fSuspended))
|
---|
606 | return VERR_HWACCM_SUSPEND_PENDING;
|
---|
607 |
|
---|
608 | if (ASMAtomicCmpXchgU32((volatile uint32_t *)&HWACCMR0Globals.enmHwAccmState, HWACCMSTATE_ENABLED, HWACCMSTATE_UNINITIALIZED))
|
---|
609 | {
|
---|
610 | int rc;
|
---|
611 |
|
---|
612 | HWACCMR0Globals.fGlobalInit = pVM->hwaccm.s.fGlobalInit;
|
---|
613 |
|
---|
614 | if ( HWACCMR0Globals.vmx.fSupported
|
---|
615 | && HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
|
---|
616 | {
|
---|
617 | rc = SUPR0EnableVTx(true /* fEnable */);
|
---|
618 | if (RT_SUCCESS(rc))
|
---|
619 | {
|
---|
620 | for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); iCpu++)
|
---|
621 | {
|
---|
622 | HWACCMR0Globals.aCpuInfo[iCpu].fConfigured = true;
|
---|
623 | Assert(HWACCMR0Globals.aCpuInfo[iCpu].pMemObj == NIL_RTR0MEMOBJ);
|
---|
624 | }
|
---|
625 | /* If the host provides a VT-x init API, then we'll rely on that for global init. */
|
---|
626 | HWACCMR0Globals.fGlobalInit = pVM->hwaccm.s.fGlobalInit = true;
|
---|
627 | }
|
---|
628 | else
|
---|
629 | AssertMsgFailed(("HWACCMR0EnableAllCpus/SUPR0EnableVTx: rc=%Rrc\n", rc));
|
---|
630 | }
|
---|
631 | else
|
---|
632 | {
|
---|
633 | int aRc[RTCPUSET_MAX_CPUS];
|
---|
634 | RTCPUID idCpu = 0;
|
---|
635 |
|
---|
636 | memset(aRc, 0, sizeof(aRc));
|
---|
637 |
|
---|
638 | /* Allocate one page per cpu for the global vt-x and amd-v pages */
|
---|
639 | for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
|
---|
640 | {
|
---|
641 | Assert(!HWACCMR0Globals.aCpuInfo[i].pMemObj);
|
---|
642 |
|
---|
643 | /** @todo this is rather dangerous if cpus can be taken offline; we don't care for now */
|
---|
644 | if (RTMpIsCpuOnline(i))
|
---|
645 | {
|
---|
646 | rc = RTR0MemObjAllocCont(&HWACCMR0Globals.aCpuInfo[i].pMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
|
---|
647 | AssertRC(rc);
|
---|
648 | if (RT_FAILURE(rc))
|
---|
649 | return rc;
|
---|
650 |
|
---|
651 | void *pvR0 = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[i].pMemObj);
|
---|
652 | Assert(pvR0);
|
---|
653 | ASMMemZeroPage(pvR0);
|
---|
654 |
|
---|
655 | #if defined(LOG_ENABLED) && !defined(DEBUG_bird)
|
---|
656 | SUPR0Printf("address %x phys %x\n", pvR0, (uint32_t)RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[i].pMemObj, 0));
|
---|
657 | #endif
|
---|
658 | }
|
---|
659 | }
|
---|
660 | if (HWACCMR0Globals.fGlobalInit)
|
---|
661 | {
|
---|
662 | /* First time, so initialize each cpu/core */
|
---|
663 | rc = RTMpOnAll(hwaccmR0EnableCpuCallback, (void *)pVM, aRc);
|
---|
664 |
|
---|
665 | /* Check the return code of all invocations. */
|
---|
666 | if (RT_SUCCESS(rc))
|
---|
667 | rc = hwaccmR0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
|
---|
668 | AssertMsgRC(rc, ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", idCpu, rc));
|
---|
669 | }
|
---|
670 | else
|
---|
671 | rc = VINF_SUCCESS;
|
---|
672 | }
|
---|
673 |
|
---|
674 | return rc;
|
---|
675 | }
|
---|
676 | return VINF_SUCCESS;
|
---|
677 | }
|
---|
678 |
|
---|
679 | /**
|
---|
680 | * Disable VT-x or AMD-V on the current CPU
|
---|
681 | *
|
---|
682 | * @returns VBox status code.
|
---|
683 | * @param pVM VM handle (can be 0!)
|
---|
684 | * @param idCpu The identifier for the CPU the function is called on.
|
---|
685 | */
|
---|
686 | static int hwaccmR0EnableCpu(PVM pVM, RTCPUID idCpu)
|
---|
687 | {
|
---|
688 | void *pvPageCpu;
|
---|
689 | RTHCPHYS pPageCpuPhys;
|
---|
690 | PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
|
---|
691 |
|
---|
692 | Assert(!HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);
|
---|
693 | Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
|
---|
694 | Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
|
---|
695 | Assert(!pCpu->fConfigured);
|
---|
696 | Assert(!HWACCMR0Globals.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);
|
---|
697 |
|
---|
698 | pCpu->idCpu = idCpu;
|
---|
699 |
|
---|
700 | /* Make sure we start with a clean TLB. */
|
---|
701 | pCpu->fFlushTLB = true;
|
---|
702 |
|
---|
703 | pCpu->uCurrentASID = 0; /* we'll aways increment this the first time (host uses ASID 0) */
|
---|
704 | pCpu->cTLBFlushes = 0;
|
---|
705 |
|
---|
706 | /* Should never happen */
|
---|
707 | if (!pCpu->pMemObj)
|
---|
708 | {
|
---|
709 | AssertFailed();
|
---|
710 | return VERR_INTERNAL_ERROR;
|
---|
711 | }
|
---|
712 |
|
---|
713 | pvPageCpu = RTR0MemObjAddress(pCpu->pMemObj);
|
---|
714 | pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
|
---|
715 |
|
---|
716 | int rc = HWACCMR0Globals.pfnEnableCpu(pCpu, pVM, pvPageCpu, pPageCpuPhys);
|
---|
717 | AssertRC(rc);
|
---|
718 | if (RT_SUCCESS(rc))
|
---|
719 | pCpu->fConfigured = true;
|
---|
720 |
|
---|
721 | return rc;
|
---|
722 | }
|
---|
723 |
|
---|
724 |
|
---|
725 | /**
|
---|
726 | * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
|
---|
727 | * is to be called on the target cpus.
|
---|
728 | *
|
---|
729 | * @param idCpu The identifier for the CPU the function is called on.
|
---|
730 | * @param pvUser1 The 1st user argument.
|
---|
731 | * @param pvUser2 The 2nd user argument.
|
---|
732 | */
|
---|
733 | static DECLCALLBACK(void) hwaccmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
|
---|
734 | {
|
---|
735 | PVM pVM = (PVM)pvUser1; /* can be NULL! */
|
---|
736 | int *paRc = (int *)pvUser2;
|
---|
737 |
|
---|
738 | if (!HWACCMR0Globals.fGlobalInit)
|
---|
739 | {
|
---|
740 | paRc[idCpu] = VINF_SUCCESS;
|
---|
741 | AssertFailed();
|
---|
742 | return;
|
---|
743 | }
|
---|
744 |
|
---|
745 | paRc[idCpu] = hwaccmR0EnableCpu(pVM, idCpu);
|
---|
746 | }
|
---|
747 |
|
---|
748 |
|
---|
749 | /**
|
---|
750 | * Disable VT-x or AMD-V on the current CPU
|
---|
751 | *
|
---|
752 | * @returns VBox status code.
|
---|
753 | * @param idCpu The identifier for the CPU the function is called on.
|
---|
754 | */
|
---|
755 | static int hwaccmR0DisableCpu(RTCPUID idCpu)
|
---|
756 | {
|
---|
757 | void *pvPageCpu;
|
---|
758 | RTHCPHYS pPageCpuPhys;
|
---|
759 | int rc;
|
---|
760 | PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
|
---|
761 |
|
---|
762 | Assert(!HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);
|
---|
763 | Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
|
---|
764 | Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
|
---|
765 | Assert(!HWACCMR0Globals.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);
|
---|
766 | Assert(!pCpu->fConfigured || pCpu->pMemObj);
|
---|
767 |
|
---|
768 | if (!pCpu->pMemObj)
|
---|
769 | return (pCpu->fConfigured) ? VERR_NO_MEMORY : VINF_SUCCESS /* not initialized. */;
|
---|
770 |
|
---|
771 | pvPageCpu = RTR0MemObjAddress(pCpu->pMemObj);
|
---|
772 | pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
|
---|
773 |
|
---|
774 | if (pCpu->fConfigured)
|
---|
775 | {
|
---|
776 | rc = HWACCMR0Globals.pfnDisableCpu(pCpu, pvPageCpu, pPageCpuPhys);
|
---|
777 | AssertRC(rc);
|
---|
778 | pCpu->fConfigured = false;
|
---|
779 | }
|
---|
780 | else
|
---|
781 | rc = VINF_SUCCESS; /* nothing to do */
|
---|
782 |
|
---|
783 | pCpu->uCurrentASID = 0;
|
---|
784 | return rc;
|
---|
785 | }
|
---|
786 |
|
---|
787 | /**
|
---|
788 | * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
|
---|
789 | * is to be called on the target cpus.
|
---|
790 | *
|
---|
791 | * @param idCpu The identifier for the CPU the function is called on.
|
---|
792 | * @param pvUser1 The 1st user argument.
|
---|
793 | * @param pvUser2 The 2nd user argument.
|
---|
794 | */
|
---|
795 | static DECLCALLBACK(void) hwaccmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
|
---|
796 | {
|
---|
797 | int *paRc = (int *)pvUser1;
|
---|
798 |
|
---|
799 | if (!HWACCMR0Globals.fGlobalInit)
|
---|
800 | {
|
---|
801 | paRc[idCpu] = VINF_SUCCESS;
|
---|
802 | AssertFailed();
|
---|
803 | return;
|
---|
804 | }
|
---|
805 |
|
---|
806 | paRc[idCpu] = hwaccmR0DisableCpu(idCpu);
|
---|
807 | }
|
---|
808 |
|
---|
809 | /**
|
---|
810 | * Called whenever a system power state change occurs.
|
---|
811 | *
|
---|
812 | * @param enmEvent Power event
|
---|
813 | * @param pvUser User argument
|
---|
814 | */
|
---|
815 | static DECLCALLBACK(void) hwaccmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser)
|
---|
816 | {
|
---|
817 | NOREF(pvUser);
|
---|
818 | Assert(!HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);
|
---|
819 |
|
---|
820 | #ifdef LOG_ENABLED
|
---|
821 | if (enmEvent == RTPOWEREVENT_SUSPEND)
|
---|
822 | SUPR0Printf("hwaccmR0PowerCallback RTPOWEREVENT_SUSPEND\n");
|
---|
823 | else
|
---|
824 | SUPR0Printf("hwaccmR0PowerCallback RTPOWEREVENT_RESUME\n");
|
---|
825 | #endif
|
---|
826 |
|
---|
827 | if (enmEvent == RTPOWEREVENT_SUSPEND)
|
---|
828 | ASMAtomicWriteBool(&HWACCMR0Globals.fSuspended, true);
|
---|
829 |
|
---|
830 | if (HWACCMR0Globals.enmHwAccmState == HWACCMSTATE_ENABLED)
|
---|
831 | {
|
---|
832 | int aRc[RTCPUSET_MAX_CPUS];
|
---|
833 | int rc;
|
---|
834 | RTCPUID idCpu;
|
---|
835 |
|
---|
836 | memset(aRc, 0, sizeof(aRc));
|
---|
837 | if (enmEvent == RTPOWEREVENT_SUSPEND)
|
---|
838 | {
|
---|
839 | if (HWACCMR0Globals.fGlobalInit)
|
---|
840 | {
|
---|
841 | /* Turn off VT-x or AMD-V on all CPUs. */
|
---|
842 | rc = RTMpOnAll(hwaccmR0DisableCpuCallback, aRc, NULL);
|
---|
843 | Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
|
---|
844 | }
|
---|
845 | /* else nothing to do here for the local init case */
|
---|
846 | }
|
---|
847 | else
|
---|
848 | {
|
---|
849 | /* Reinit the CPUs from scratch as the suspend state might have messed with the MSRs. (lousy BIOSes as usual) */
|
---|
850 | rc = RTMpOnAll(HWACCMR0InitCPU, (void *)((HWACCMR0Globals.vmx.fSupported) ? X86_CPUID_VENDOR_INTEL_EBX : X86_CPUID_VENDOR_AMD_EBX), aRc);
|
---|
851 | Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
|
---|
852 |
|
---|
853 | if (RT_SUCCESS(rc))
|
---|
854 | rc = hwaccmR0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
|
---|
855 | #ifdef LOG_ENABLED
|
---|
856 | if (RT_FAILURE(rc))
|
---|
857 | SUPR0Printf("hwaccmR0PowerCallback HWACCMR0InitCPU failed with %d\n", rc);
|
---|
858 | #endif
|
---|
859 |
|
---|
860 | if (HWACCMR0Globals.fGlobalInit)
|
---|
861 | {
|
---|
862 | /* Turn VT-x or AMD-V back on on all CPUs. */
|
---|
863 | rc = RTMpOnAll(hwaccmR0EnableCpuCallback, NULL, aRc);
|
---|
864 | Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
|
---|
865 | }
|
---|
866 | /* else nothing to do here for the local init case */
|
---|
867 | }
|
---|
868 | }
|
---|
869 | if (enmEvent == RTPOWEREVENT_RESUME)
|
---|
870 | ASMAtomicWriteBool(&HWACCMR0Globals.fSuspended, false);
|
---|
871 | }
|
---|
872 |
|
---|
873 |
|
---|
874 | /**
|
---|
875 | * Does Ring-0 per VM HWACCM initialization.
|
---|
876 | *
|
---|
877 | * This is mainly to check that the Host CPU mode is compatible
|
---|
878 | * with VMX.
|
---|
879 | *
|
---|
880 | * @returns VBox status code.
|
---|
881 | * @param pVM The VM to operate on.
|
---|
882 | */
|
---|
883 | VMMR0DECL(int) HWACCMR0InitVM(PVM pVM)
|
---|
884 | {
|
---|
885 | int rc;
|
---|
886 |
|
---|
887 | AssertReturn(pVM, VERR_INVALID_PARAMETER);
|
---|
888 |
|
---|
889 | #ifdef LOG_ENABLED
|
---|
890 | SUPR0Printf("HWACCMR0InitVM: %p\n", pVM);
|
---|
891 | #endif
|
---|
892 |
|
---|
893 | /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
|
---|
894 | if (ASMAtomicReadBool(&HWACCMR0Globals.fSuspended))
|
---|
895 | return VERR_HWACCM_SUSPEND_PENDING;
|
---|
896 |
|
---|
897 | pVM->hwaccm.s.vmx.fSupported = HWACCMR0Globals.vmx.fSupported;
|
---|
898 | pVM->hwaccm.s.svm.fSupported = HWACCMR0Globals.svm.fSupported;
|
---|
899 |
|
---|
900 | pVM->hwaccm.s.vmx.msr.feature_ctrl = HWACCMR0Globals.vmx.msr.feature_ctrl;
|
---|
901 | pVM->hwaccm.s.vmx.hostCR4 = HWACCMR0Globals.vmx.hostCR4;
|
---|
902 | pVM->hwaccm.s.vmx.hostEFER = HWACCMR0Globals.vmx.hostEFER;
|
---|
903 | pVM->hwaccm.s.vmx.msr.vmx_basic_info = HWACCMR0Globals.vmx.msr.vmx_basic_info;
|
---|
904 | pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = HWACCMR0Globals.vmx.msr.vmx_pin_ctls;
|
---|
905 | pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = HWACCMR0Globals.vmx.msr.vmx_proc_ctls;
|
---|
906 | pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2 = HWACCMR0Globals.vmx.msr.vmx_proc_ctls2;
|
---|
907 | pVM->hwaccm.s.vmx.msr.vmx_exit = HWACCMR0Globals.vmx.msr.vmx_exit;
|
---|
908 | pVM->hwaccm.s.vmx.msr.vmx_entry = HWACCMR0Globals.vmx.msr.vmx_entry;
|
---|
909 | pVM->hwaccm.s.vmx.msr.vmx_misc = HWACCMR0Globals.vmx.msr.vmx_misc;
|
---|
910 | pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0;
|
---|
911 | pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1;
|
---|
912 | pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0;
|
---|
913 | pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1;
|
---|
914 | pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = HWACCMR0Globals.vmx.msr.vmx_vmcs_enum;
|
---|
915 | pVM->hwaccm.s.vmx.msr.vmx_eptcaps = HWACCMR0Globals.vmx.msr.vmx_eptcaps;
|
---|
916 | pVM->hwaccm.s.svm.msrHWCR = HWACCMR0Globals.svm.msrHWCR;
|
---|
917 | pVM->hwaccm.s.svm.u32Rev = HWACCMR0Globals.svm.u32Rev;
|
---|
918 | pVM->hwaccm.s.svm.u32Features = HWACCMR0Globals.svm.u32Features;
|
---|
919 | pVM->hwaccm.s.cpuid.u32AMDFeatureECX = HWACCMR0Globals.cpuid.u32AMDFeatureECX;
|
---|
920 | pVM->hwaccm.s.cpuid.u32AMDFeatureEDX = HWACCMR0Globals.cpuid.u32AMDFeatureEDX;
|
---|
921 | pVM->hwaccm.s.lLastError = HWACCMR0Globals.lLastError;
|
---|
922 |
|
---|
923 | pVM->hwaccm.s.uMaxASID = HWACCMR0Globals.uMaxASID;
|
---|
924 |
|
---|
925 |
|
---|
926 | if (!pVM->hwaccm.s.cMaxResumeLoops) /* allow ring-3 overrides */
|
---|
927 | {
|
---|
928 | pVM->hwaccm.s.cMaxResumeLoops = 1024;
|
---|
929 | #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
|
---|
930 | if (RTThreadPreemptIsPendingTrusty())
|
---|
931 | pVM->hwaccm.s.cMaxResumeLoops = 8192;
|
---|
932 | #endif
|
---|
933 | }
|
---|
934 |
|
---|
935 | for (VMCPUID i = 0; i < pVM->cCpus; i++)
|
---|
936 | {
|
---|
937 | PVMCPU pVCpu = &pVM->aCpus[i];
|
---|
938 |
|
---|
939 | pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID;
|
---|
940 |
|
---|
941 | /* Invalidate the last cpu we were running on. */
|
---|
942 | pVCpu->hwaccm.s.idLastCpu = NIL_RTCPUID;
|
---|
943 |
|
---|
944 | /* we'll aways increment this the first time (host uses ASID 0) */
|
---|
945 | pVCpu->hwaccm.s.uCurrentASID = 0;
|
---|
946 | }
|
---|
947 |
|
---|
948 | RTCCUINTREG fFlags = ASMIntDisableFlags();
|
---|
949 | PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();
|
---|
950 |
|
---|
951 | /* Note: Not correct as we can be rescheduled to a different cpu, but the fInUse case is mostly for debugging. */
|
---|
952 | ASMAtomicWriteBool(&pCpu->fInUse, true);
|
---|
953 | ASMSetFlags(fFlags);
|
---|
954 |
|
---|
955 | /* Init a VT-x or AMD-V VM. */
|
---|
956 | rc = HWACCMR0Globals.pfnInitVM(pVM);
|
---|
957 |
|
---|
958 | ASMAtomicWriteBool(&pCpu->fInUse, false);
|
---|
959 | return rc;
|
---|
960 | }
|
---|
961 |
|
---|
962 |
|
---|
963 | /**
|
---|
964 | * Does Ring-0 per VM HWACCM termination.
|
---|
965 | *
|
---|
966 | * @returns VBox status code.
|
---|
967 | * @param pVM The VM to operate on.
|
---|
968 | */
|
---|
969 | VMMR0DECL(int) HWACCMR0TermVM(PVM pVM)
|
---|
970 | {
|
---|
971 | int rc;
|
---|
972 |
|
---|
973 | AssertReturn(pVM, VERR_INVALID_PARAMETER);
|
---|
974 |
|
---|
975 | #ifdef LOG_ENABLED
|
---|
976 | SUPR0Printf("HWACCMR0TermVM: %p\n", pVM);
|
---|
977 | #endif
|
---|
978 |
|
---|
979 | /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
|
---|
980 | AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
|
---|
981 |
|
---|
982 | /* @note Not correct as we can be rescheduled to a different cpu, but the fInUse case is mostly for debugging. */
|
---|
983 | RTCCUINTREG fFlags = ASMIntDisableFlags();
|
---|
984 | PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();
|
---|
985 |
|
---|
986 | ASMAtomicWriteBool(&pCpu->fInUse, true);
|
---|
987 | ASMSetFlags(fFlags);
|
---|
988 |
|
---|
989 | /* Terminate a VT-x or AMD-V VM. */
|
---|
990 | rc = HWACCMR0Globals.pfnTermVM(pVM);
|
---|
991 |
|
---|
992 | ASMAtomicWriteBool(&pCpu->fInUse, false);
|
---|
993 | return rc;
|
---|
994 | }
|
---|
995 |
|
---|
996 |
|
---|
997 | /**
|
---|
998 | * Sets up a VT-x or AMD-V session
|
---|
999 | *
|
---|
1000 | * @returns VBox status code.
|
---|
1001 | * @param pVM The VM to operate on.
|
---|
1002 | */
|
---|
1003 | VMMR0DECL(int) HWACCMR0SetupVM(PVM pVM)
|
---|
1004 | {
|
---|
1005 | int rc;
|
---|
1006 | RTCPUID idCpu = RTMpCpuId();
|
---|
1007 | PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
|
---|
1008 |
|
---|
1009 | AssertReturn(pVM, VERR_INVALID_PARAMETER);
|
---|
1010 |
|
---|
1011 | /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
|
---|
1012 | AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
|
---|
1013 |
|
---|
1014 | #ifdef LOG_ENABLED
|
---|
1015 | SUPR0Printf("HWACCMR0SetupVM: %p\n", pVM);
|
---|
1016 | #endif
|
---|
1017 |
|
---|
1018 | ASMAtomicWriteBool(&pCpu->fInUse, true);
|
---|
1019 |
|
---|
1020 | for (VMCPUID i = 0; i < pVM->cCpus; i++)
|
---|
1021 | {
|
---|
1022 | /* On first entry we'll sync everything. */
|
---|
1023 | pVM->aCpus[i].hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
|
---|
1024 | }
|
---|
1025 |
|
---|
1026 | /* Enable VT-x or AMD-V if local init is required. */
|
---|
1027 | if (!HWACCMR0Globals.fGlobalInit)
|
---|
1028 | {
|
---|
1029 | rc = hwaccmR0EnableCpu(pVM, idCpu);
|
---|
1030 | AssertRCReturn(rc, rc);
|
---|
1031 | }
|
---|
1032 |
|
---|
1033 | /* Setup VT-x or AMD-V. */
|
---|
1034 | rc = HWACCMR0Globals.pfnSetupVM(pVM);
|
---|
1035 |
|
---|
1036 | /* Disable VT-x or AMD-V if local init was done before. */
|
---|
1037 | if (!HWACCMR0Globals.fGlobalInit)
|
---|
1038 | {
|
---|
1039 | rc = hwaccmR0DisableCpu(idCpu);
|
---|
1040 | AssertRC(rc);
|
---|
1041 | }
|
---|
1042 |
|
---|
1043 | ASMAtomicWriteBool(&pCpu->fInUse, false);
|
---|
1044 |
|
---|
1045 | return rc;
|
---|
1046 | }
|
---|
1047 |
|
---|
1048 |
|
---|
1049 | /**
|
---|
1050 | * Enters the VT-x or AMD-V session
|
---|
1051 | *
|
---|
1052 | * @returns VBox status code.
|
---|
1053 | * @param pVM The VM to operate on.
|
---|
1054 | * @param pVCpu VMCPUD id.
|
---|
1055 | */
|
---|
1056 | VMMR0DECL(int) HWACCMR0Enter(PVM pVM, PVMCPU pVCpu)
|
---|
1057 | {
|
---|
1058 | PCPUMCTX pCtx;
|
---|
1059 | int rc;
|
---|
1060 | RTCPUID idCpu = RTMpCpuId();
|
---|
1061 | PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
|
---|
1062 |
|
---|
1063 | /* Make sure we can't enter a session after we've disabled hwaccm in preparation of a suspend. */
|
---|
1064 | AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
|
---|
1065 | ASMAtomicWriteBool(&pCpu->fInUse, true);
|
---|
1066 |
|
---|
1067 | AssertMsg(pVCpu->hwaccm.s.idEnteredCpu == NIL_RTCPUID, ("%d", (int)pVCpu->hwaccm.s.idEnteredCpu));
|
---|
1068 | pVCpu->hwaccm.s.idEnteredCpu = idCpu;
|
---|
1069 |
|
---|
1070 | pCtx = CPUMQueryGuestCtxPtr(pVCpu);
|
---|
1071 |
|
---|
1072 | /* Always load the guest's FPU/XMM state on-demand. */
|
---|
1073 | CPUMDeactivateGuestFPUState(pVCpu);
|
---|
1074 |
|
---|
1075 | /* Always load the guest's debug state on-demand. */
|
---|
1076 | CPUMDeactivateGuestDebugState(pVCpu);
|
---|
1077 |
|
---|
1078 | /* Always reload the host context and the guest's CR0 register. (!!!!) */
|
---|
1079 | pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
|
---|
1080 |
|
---|
1081 | /* Setup the register and mask according to the current execution mode. */
|
---|
1082 | if (pCtx->msrEFER & MSR_K6_EFER_LMA)
|
---|
1083 | pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFFFFFFFFFF);
|
---|
1084 | else
|
---|
1085 | pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFF);
|
---|
1086 |
|
---|
1087 | /* Enable VT-x or AMD-V if local init is required. */
|
---|
1088 | if (!HWACCMR0Globals.fGlobalInit)
|
---|
1089 | {
|
---|
1090 | rc = hwaccmR0EnableCpu(pVM, idCpu);
|
---|
1091 | AssertRCReturn(rc, rc);
|
---|
1092 | }
|
---|
1093 |
|
---|
1094 | #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
|
---|
1095 | bool fStartedSet = PGMDynMapStartOrMigrateAutoSet(pVCpu);
|
---|
1096 | #endif
|
---|
1097 |
|
---|
1098 | rc = HWACCMR0Globals.pfnEnterSession(pVM, pVCpu, pCpu);
|
---|
1099 | AssertRC(rc);
|
---|
1100 | /* We must save the host context here (VT-x) as we might be rescheduled on a different cpu after a long jump back to ring 3. */
|
---|
1101 | rc |= HWACCMR0Globals.pfnSaveHostState(pVM, pVCpu);
|
---|
1102 | AssertRC(rc);
|
---|
1103 | rc |= HWACCMR0Globals.pfnLoadGuestState(pVM, pVCpu, pCtx);
|
---|
1104 | AssertRC(rc);
|
---|
1105 |
|
---|
1106 | #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
|
---|
1107 | if (fStartedSet)
|
---|
1108 | PGMDynMapReleaseAutoSet(pVCpu);
|
---|
1109 | #endif
|
---|
1110 |
|
---|
1111 | /* keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */
|
---|
1112 | if (RT_FAILURE(rc))
|
---|
1113 | pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID;
|
---|
1114 | return rc;
|
---|
1115 | }
|
---|
1116 |
|
---|
1117 |
|
---|
1118 | /**
|
---|
1119 | * Leaves the VT-x or AMD-V session
|
---|
1120 | *
|
---|
1121 | * @returns VBox status code.
|
---|
1122 | * @param pVM The VM to operate on.
|
---|
1123 | * @param pVCpu VMCPUD id.
|
---|
1124 | */
|
---|
1125 | VMMR0DECL(int) HWACCMR0Leave(PVM pVM, PVMCPU pVCpu)
|
---|
1126 | {
|
---|
1127 | PCPUMCTX pCtx;
|
---|
1128 | int rc;
|
---|
1129 | RTCPUID idCpu = RTMpCpuId();
|
---|
1130 | PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
|
---|
1131 |
|
---|
1132 | AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
|
---|
1133 |
|
---|
1134 | pCtx = CPUMQueryGuestCtxPtr(pVCpu);
|
---|
1135 |
|
---|
1136 | /* Note: It's rather tricky with longjmps done by e.g. Log statements or the page fault handler.
|
---|
1137 | * We must restore the host FPU here to make absolutely sure we don't leave the guest FPU state active
|
---|
1138 | * or trash somebody else's FPU state.
|
---|
1139 | */
|
---|
1140 | /* Save the guest FPU and XMM state if necessary. */
|
---|
1141 | if (CPUMIsGuestFPUStateActive(pVCpu))
|
---|
1142 | {
|
---|
1143 | Log2(("CPUMR0SaveGuestFPU\n"));
|
---|
1144 | CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
|
---|
1145 |
|
---|
1146 | pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
|
---|
1147 | Assert(!CPUMIsGuestFPUStateActive(pVCpu));
|
---|
1148 | }
|
---|
1149 |
|
---|
1150 | rc = HWACCMR0Globals.pfnLeaveSession(pVM, pVCpu, pCtx);
|
---|
1151 |
|
---|
1152 | /* We don't pass on invlpg information to the recompiler for nested paging guests, so we must make sure the recompiler flushes its TLB
|
---|
1153 | * the next time it executes code.
|
---|
1154 | */
|
---|
1155 | if ( pVM->hwaccm.s.fNestedPaging
|
---|
1156 | && CPUMIsGuestInPagedProtectedModeEx(pCtx))
|
---|
1157 | {
|
---|
1158 | CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
|
---|
1159 | }
|
---|
1160 |
|
---|
1161 | /* keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */
|
---|
1162 | #ifdef RT_STRICT
|
---|
1163 | if (RT_UNLIKELY( pVCpu->hwaccm.s.idEnteredCpu != idCpu
|
---|
1164 | && RT_FAILURE(rc)))
|
---|
1165 | {
|
---|
1166 | AssertMsgFailed(("Owner is %d, I'm %d", (int)pVCpu->hwaccm.s.idEnteredCpu, (int)idCpu));
|
---|
1167 | rc = VERR_INTERNAL_ERROR;
|
---|
1168 | }
|
---|
1169 | #endif
|
---|
1170 | pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID;
|
---|
1171 |
|
---|
1172 | /* Disable VT-x or AMD-V if local init was done before. */
|
---|
1173 | if (!HWACCMR0Globals.fGlobalInit)
|
---|
1174 | {
|
---|
1175 | rc = hwaccmR0DisableCpu(idCpu);
|
---|
1176 | AssertRC(rc);
|
---|
1177 |
|
---|
1178 | /* Reset these to force a TLB flush for the next entry. (-> EXPENSIVE) */
|
---|
1179 | pVCpu->hwaccm.s.idLastCpu = NIL_RTCPUID;
|
---|
1180 | pVCpu->hwaccm.s.uCurrentASID = 0;
|
---|
1181 | VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
|
---|
1182 | }
|
---|
1183 |
|
---|
1184 | ASMAtomicWriteBool(&pCpu->fInUse, false);
|
---|
1185 | return rc;
|
---|
1186 | }
|
---|
1187 |
|
---|
1188 | /**
|
---|
1189 | * Runs guest code in a hardware accelerated VM.
|
---|
1190 | *
|
---|
1191 | * @returns VBox status code.
|
---|
1192 | * @param pVM The VM to operate on.
|
---|
1193 | * @param pVCpu VMCPUD id.
|
---|
1194 | */
|
---|
1195 | VMMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM, PVMCPU pVCpu)
|
---|
1196 | {
|
---|
1197 | CPUMCTX *pCtx;
|
---|
1198 | int rc;
|
---|
1199 | #ifdef VBOX_STRICT
|
---|
1200 | RTCPUID idCpu = RTMpCpuId(); NOREF(idCpu);
|
---|
1201 | PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
|
---|
1202 | #endif
|
---|
1203 |
|
---|
1204 | Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
|
---|
1205 | Assert(HWACCMR0Globals.aCpuInfo[idCpu].fConfigured);
|
---|
1206 | AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
|
---|
1207 | Assert(ASMAtomicReadBool(&pCpu->fInUse) == true);
|
---|
1208 |
|
---|
1209 | #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
|
---|
1210 | PGMDynMapStartAutoSet(pVCpu);
|
---|
1211 | #endif
|
---|
1212 |
|
---|
1213 | pCtx = CPUMQueryGuestCtxPtr(pVCpu);
|
---|
1214 |
|
---|
1215 | rc = HWACCMR0Globals.pfnRunGuestCode(pVM, pVCpu, pCtx);
|
---|
1216 |
|
---|
1217 | #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
|
---|
1218 | PGMDynMapReleaseAutoSet(pVCpu);
|
---|
1219 | #endif
|
---|
1220 | return rc;
|
---|
1221 | }
|
---|
1222 |
|
---|
1223 |
|
---|
1224 | #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
|
---|
1225 | /**
|
---|
1226 | * Save guest FPU/XMM state (64 bits guest mode & 32 bits host only)
|
---|
1227 | *
|
---|
1228 | * @returns VBox status code.
|
---|
1229 | * @param pVM VM handle.
|
---|
1230 | * @param pVCpu VMCPU handle.
|
---|
1231 | * @param pCtx CPU context
|
---|
1232 | */
|
---|
1233 | VMMR0DECL(int) HWACCMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
|
---|
1234 | {
|
---|
1235 | if (pVM->hwaccm.s.vmx.fSupported)
|
---|
1236 | return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestFPU64, 0, NULL);
|
---|
1237 |
|
---|
1238 | return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestFPU64, 0, NULL);
|
---|
1239 | }
|
---|
1240 |
|
---|
1241 | /**
|
---|
1242 | * Save guest debug state (64 bits guest mode & 32 bits host only)
|
---|
1243 | *
|
---|
1244 | * @returns VBox status code.
|
---|
1245 | * @param pVM VM handle.
|
---|
1246 | * @param pVCpu VMCPU handle.
|
---|
1247 | * @param pCtx CPU context
|
---|
1248 | */
|
---|
1249 | VMMR0DECL(int) HWACCMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
|
---|
1250 | {
|
---|
1251 | if (pVM->hwaccm.s.vmx.fSupported)
|
---|
1252 | return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestDebug64, 0, NULL);
|
---|
1253 |
|
---|
1254 | return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestDebug64, 0, NULL);
|
---|
1255 | }
|
---|
1256 |
|
---|
1257 | /**
|
---|
1258 | * Test the 32->64 bits switcher
|
---|
1259 | *
|
---|
1260 | * @returns VBox status code.
|
---|
1261 | * @param pVM VM handle.
|
---|
1262 | */
|
---|
1263 | VMMR0DECL(int) HWACCMR0TestSwitcher3264(PVM pVM)
|
---|
1264 | {
|
---|
1265 | PVMCPU pVCpu = &pVM->aCpus[0];
|
---|
1266 | CPUMCTX *pCtx;
|
---|
1267 | uint32_t aParam[5] = {0, 1, 2, 3, 4};
|
---|
1268 | int rc;
|
---|
1269 |
|
---|
1270 | pCtx = CPUMQueryGuestCtxPtr(pVCpu);
|
---|
1271 |
|
---|
1272 | STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
|
---|
1273 | if (pVM->hwaccm.s.vmx.fSupported)
|
---|
1274 | rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnTest64, 5, &aParam[0]);
|
---|
1275 | else
|
---|
1276 | rc = SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnTest64, 5, &aParam[0]);
|
---|
1277 | STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
|
---|
1278 | return rc;
|
---|
1279 | }
|
---|
1280 |
|
---|
1281 | #endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
|
---|
1282 |
|
---|
1283 | /**
|
---|
1284 | * Returns suspend status of the host
|
---|
1285 | *
|
---|
1286 | * @returns Suspend pending or not
|
---|
1287 | */
|
---|
1288 | VMMR0DECL(bool) HWACCMR0SuspendPending()
|
---|
1289 | {
|
---|
1290 | return ASMAtomicReadBool(&HWACCMR0Globals.fSuspended);
|
---|
1291 | }
|
---|
1292 |
|
---|
1293 | /**
|
---|
1294 | * Returns the cpu structure for the current cpu.
|
---|
1295 | * Keep in mind that there is no guarantee it will stay the same (long jumps to ring 3!!!).
|
---|
1296 | *
|
---|
1297 | * @returns cpu structure pointer
|
---|
1298 | */
|
---|
1299 | VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpu()
|
---|
1300 | {
|
---|
1301 | RTCPUID idCpu = RTMpCpuId();
|
---|
1302 |
|
---|
1303 | return &HWACCMR0Globals.aCpuInfo[idCpu];
|
---|
1304 | }
|
---|
1305 |
|
---|
1306 | /**
|
---|
1307 | * Returns the cpu structure for the current cpu.
|
---|
1308 | * Keep in mind that there is no guarantee it will stay the same (long jumps to ring 3!!!).
|
---|
1309 | *
|
---|
1310 | * @returns cpu structure pointer
|
---|
1311 | * @param idCpu id of the VCPU
|
---|
1312 | */
|
---|
1313 | VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu)
|
---|
1314 | {
|
---|
1315 | return &HWACCMR0Globals.aCpuInfo[idCpu];
|
---|
1316 | }
|
---|
1317 |
|
---|
1318 | /**
|
---|
1319 | * Returns the VMCPU of the current EMT thread.
|
---|
1320 | *
|
---|
1321 | * @param pVM The VM to operate on.
|
---|
1322 | */
|
---|
1323 | VMMR0DECL(PVMCPU) HWACCMR0GetVMCPU(PVM pVM)
|
---|
1324 | {
|
---|
1325 | /* RTMpCpuId had better be cheap. */
|
---|
1326 | RTCPUID idHostCpu = RTMpCpuId();
|
---|
1327 |
|
---|
1328 | /** @todo optimize for large number of VCPUs when that becomes more common. */
|
---|
1329 | for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
|
---|
1330 | {
|
---|
1331 | PVMCPU pVCpu = &pVM->aCpus[idCpu];
|
---|
1332 |
|
---|
1333 | if (pVCpu->hwaccm.s.idEnteredCpu == idHostCpu)
|
---|
1334 | return pVCpu;
|
---|
1335 | }
|
---|
1336 | return NULL;
|
---|
1337 | }
|
---|
1338 |
|
---|
1339 | /**
|
---|
1340 | * Returns the VMCPU id of the current EMT thread.
|
---|
1341 | *
|
---|
1342 | * @param pVM The VM to operate on.
|
---|
1343 | */
|
---|
1344 | VMMR0DECL(VMCPUID) HWACCMR0GetVMCPUId(PVM pVM)
|
---|
1345 | {
|
---|
1346 | PVMCPU pVCpu = HWACCMR0GetVMCPU(pVM);
|
---|
1347 | if (pVCpu)
|
---|
1348 | return pVCpu->idCpu;
|
---|
1349 |
|
---|
1350 | return 0;
|
---|
1351 | }
|
---|
1352 |
|
---|
1353 | /**
|
---|
1354 | * Save a pending IO read.
|
---|
1355 | *
|
---|
1356 | * @param pVCpu The VMCPU to operate on.
|
---|
1357 | * @param GCPtrRip Address of IO instruction
|
---|
1358 | * @param GCPtrRipNext Address of the next instruction
|
---|
1359 | * @param uPort Port address
|
---|
1360 | * @param uAndVal And mask for saving the result in eax
|
---|
1361 | * @param cbSize Read size
|
---|
1362 | */
|
---|
1363 | VMMR0DECL(void) HWACCMR0SavePendingIOPortRead(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize)
|
---|
1364 | {
|
---|
1365 | pVCpu->hwaccm.s.PendingIO.enmType = HWACCMPENDINGIO_PORT_READ;
|
---|
1366 | pVCpu->hwaccm.s.PendingIO.GCPtrRip = GCPtrRip;
|
---|
1367 | pVCpu->hwaccm.s.PendingIO.GCPtrRipNext = GCPtrRipNext;
|
---|
1368 | pVCpu->hwaccm.s.PendingIO.s.Port.uPort = uPort;
|
---|
1369 | pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal = uAndVal;
|
---|
1370 | pVCpu->hwaccm.s.PendingIO.s.Port.cbSize = cbSize;
|
---|
1371 | return;
|
---|
1372 | }
|
---|
1373 |
|
---|
1374 | /**
|
---|
1375 | * Save a pending IO write.
|
---|
1376 | *
|
---|
1377 | * @param pVCpu The VMCPU to operate on.
|
---|
1378 | * @param GCPtrRIP Address of IO instruction
|
---|
1379 | * @param uPort Port address
|
---|
1380 | * @param uAndVal And mask for fetching the result from eax
|
---|
1381 | * @param cbSize Read size
|
---|
1382 | */
|
---|
1383 | VMMR0DECL(void) HWACCMR0SavePendingIOPortWrite(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize)
|
---|
1384 | {
|
---|
1385 | pVCpu->hwaccm.s.PendingIO.enmType = HWACCMPENDINGIO_PORT_WRITE;
|
---|
1386 | pVCpu->hwaccm.s.PendingIO.GCPtrRip = GCPtrRip;
|
---|
1387 | pVCpu->hwaccm.s.PendingIO.GCPtrRipNext = GCPtrRipNext;
|
---|
1388 | pVCpu->hwaccm.s.PendingIO.s.Port.uPort = uPort;
|
---|
1389 | pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal = uAndVal;
|
---|
1390 | pVCpu->hwaccm.s.PendingIO.s.Port.cbSize = cbSize;
|
---|
1391 | return;
|
---|
1392 | }
|
---|
1393 |
|
---|
1394 | /**
|
---|
1395 | * Disable VT-x if it's active *and* the current switcher turns off paging
|
---|
1396 | *
|
---|
1397 | * @returns VBox status code.
|
---|
1398 | * @param pVM VM handle.
|
---|
1399 | * @param pfVTxDisabled VT-x was disabled or not (out)
|
---|
1400 | */
|
---|
1401 | VMMR0DECL(int) HWACCMR0EnterSwitcher(PVM pVM, bool *pfVTxDisabled)
|
---|
1402 | {
|
---|
1403 | Assert(!(ASMGetFlags() & X86_EFL_IF));
|
---|
1404 |
|
---|
1405 | *pfVTxDisabled = false;
|
---|
1406 |
|
---|
1407 | if ( HWACCMR0Globals.enmHwAccmState != HWACCMSTATE_ENABLED
|
---|
1408 | || !HWACCMR0Globals.vmx.fSupported /* no such issues with AMD-V */
|
---|
1409 | || !HWACCMR0Globals.fGlobalInit /* Local init implies the CPU is currently not in VMX root mode. */)
|
---|
1410 | return VINF_SUCCESS; /* nothing to do */
|
---|
1411 |
|
---|
1412 | switch(VMMGetSwitcher(pVM))
|
---|
1413 | {
|
---|
1414 | case VMMSWITCHER_32_TO_32:
|
---|
1415 | case VMMSWITCHER_PAE_TO_PAE:
|
---|
1416 | return VINF_SUCCESS; /* safe switchers as they don't turn off paging */
|
---|
1417 |
|
---|
1418 | case VMMSWITCHER_32_TO_PAE:
|
---|
1419 | case VMMSWITCHER_PAE_TO_32: /* is this one actually used?? */
|
---|
1420 | case VMMSWITCHER_AMD64_TO_32:
|
---|
1421 | case VMMSWITCHER_AMD64_TO_PAE:
|
---|
1422 | break; /* unsafe switchers */
|
---|
1423 |
|
---|
1424 | default:
|
---|
1425 | AssertFailed();
|
---|
1426 | return VERR_INTERNAL_ERROR;
|
---|
1427 | }
|
---|
1428 |
|
---|
1429 | PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();
|
---|
1430 | void *pvPageCpu;
|
---|
1431 | RTHCPHYS pPageCpuPhys;
|
---|
1432 |
|
---|
1433 | AssertReturn(pCpu && pCpu->pMemObj, VERR_INTERNAL_ERROR);
|
---|
1434 | pvPageCpu = RTR0MemObjAddress(pCpu->pMemObj);
|
---|
1435 | pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
|
---|
1436 |
|
---|
1437 | *pfVTxDisabled = true;
|
---|
1438 | return VMXR0DisableCpu(pCpu, pvPageCpu, pPageCpuPhys);
|
---|
1439 | }
|
---|
1440 |
|
---|
1441 | /**
|
---|
1442 | * Reeable VT-x if was active *and* the current switcher turned off paging
|
---|
1443 | *
|
---|
1444 | * @returns VBox status code.
|
---|
1445 | * @param pVM VM handle.
|
---|
1446 | * @param fVTxDisabled VT-x was disabled or not
|
---|
1447 | */
|
---|
1448 | VMMR0DECL(int) HWACCMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled)
|
---|
1449 | {
|
---|
1450 | Assert(!(ASMGetFlags() & X86_EFL_IF));
|
---|
1451 |
|
---|
1452 | if (!fVTxDisabled)
|
---|
1453 | return VINF_SUCCESS; /* nothing to do */
|
---|
1454 |
|
---|
1455 | Assert( HWACCMR0Globals.enmHwAccmState == HWACCMSTATE_ENABLED
|
---|
1456 | && HWACCMR0Globals.vmx.fSupported
|
---|
1457 | && HWACCMR0Globals.fGlobalInit);
|
---|
1458 |
|
---|
1459 | PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();
|
---|
1460 | void *pvPageCpu;
|
---|
1461 | RTHCPHYS pPageCpuPhys;
|
---|
1462 |
|
---|
1463 | AssertReturn(pCpu && pCpu->pMemObj, VERR_INTERNAL_ERROR);
|
---|
1464 | pvPageCpu = RTR0MemObjAddress(pCpu->pMemObj);
|
---|
1465 | pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
|
---|
1466 |
|
---|
1467 | return VMXR0EnableCpu(pCpu, pVM, pvPageCpu, pPageCpuPhys);
|
---|
1468 | }
|
---|
1469 |
|
---|
1470 | #ifdef VBOX_STRICT
|
---|
1471 | /**
|
---|
1472 | * Dumps a descriptor.
|
---|
1473 | *
|
---|
1474 | * @param pDesc Descriptor to dump.
|
---|
1475 | * @param Sel Selector number.
|
---|
1476 | * @param pszMsg Message to prepend the log entry with.
|
---|
1477 | */
|
---|
1478 | VMMR0DECL(void) HWACCMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg)
|
---|
1479 | {
|
---|
1480 | /*
|
---|
1481 | * Make variable description string.
|
---|
1482 | */
|
---|
1483 | static struct
|
---|
1484 | {
|
---|
1485 | unsigned cch;
|
---|
1486 | const char *psz;
|
---|
1487 | } const s_aTypes[32] =
|
---|
1488 | {
|
---|
1489 | # define STRENTRY(str) { sizeof(str) - 1, str }
|
---|
1490 |
|
---|
1491 | /* system */
|
---|
1492 | # if HC_ARCH_BITS == 64
|
---|
1493 | STRENTRY("Reserved0 "), /* 0x00 */
|
---|
1494 | STRENTRY("Reserved1 "), /* 0x01 */
|
---|
1495 | STRENTRY("LDT "), /* 0x02 */
|
---|
1496 | STRENTRY("Reserved3 "), /* 0x03 */
|
---|
1497 | STRENTRY("Reserved4 "), /* 0x04 */
|
---|
1498 | STRENTRY("Reserved5 "), /* 0x05 */
|
---|
1499 | STRENTRY("Reserved6 "), /* 0x06 */
|
---|
1500 | STRENTRY("Reserved7 "), /* 0x07 */
|
---|
1501 | STRENTRY("Reserved8 "), /* 0x08 */
|
---|
1502 | STRENTRY("TSS64Avail "), /* 0x09 */
|
---|
1503 | STRENTRY("ReservedA "), /* 0x0a */
|
---|
1504 | STRENTRY("TSS64Busy "), /* 0x0b */
|
---|
1505 | STRENTRY("Call64 "), /* 0x0c */
|
---|
1506 | STRENTRY("ReservedD "), /* 0x0d */
|
---|
1507 | STRENTRY("Int64 "), /* 0x0e */
|
---|
1508 | STRENTRY("Trap64 "), /* 0x0f */
|
---|
1509 | # else
|
---|
1510 | STRENTRY("Reserved0 "), /* 0x00 */
|
---|
1511 | STRENTRY("TSS16Avail "), /* 0x01 */
|
---|
1512 | STRENTRY("LDT "), /* 0x02 */
|
---|
1513 | STRENTRY("TSS16Busy "), /* 0x03 */
|
---|
1514 | STRENTRY("Call16 "), /* 0x04 */
|
---|
1515 | STRENTRY("Task "), /* 0x05 */
|
---|
1516 | STRENTRY("Int16 "), /* 0x06 */
|
---|
1517 | STRENTRY("Trap16 "), /* 0x07 */
|
---|
1518 | STRENTRY("Reserved8 "), /* 0x08 */
|
---|
1519 | STRENTRY("TSS32Avail "), /* 0x09 */
|
---|
1520 | STRENTRY("ReservedA "), /* 0x0a */
|
---|
1521 | STRENTRY("TSS32Busy "), /* 0x0b */
|
---|
1522 | STRENTRY("Call32 "), /* 0x0c */
|
---|
1523 | STRENTRY("ReservedD "), /* 0x0d */
|
---|
1524 | STRENTRY("Int32 "), /* 0x0e */
|
---|
1525 | STRENTRY("Trap32 "), /* 0x0f */
|
---|
1526 | # endif
|
---|
1527 | /* non system */
|
---|
1528 | STRENTRY("DataRO "), /* 0x10 */
|
---|
1529 | STRENTRY("DataRO Accessed "), /* 0x11 */
|
---|
1530 | STRENTRY("DataRW "), /* 0x12 */
|
---|
1531 | STRENTRY("DataRW Accessed "), /* 0x13 */
|
---|
1532 | STRENTRY("DataDownRO "), /* 0x14 */
|
---|
1533 | STRENTRY("DataDownRO Accessed "), /* 0x15 */
|
---|
1534 | STRENTRY("DataDownRW "), /* 0x16 */
|
---|
1535 | STRENTRY("DataDownRW Accessed "), /* 0x17 */
|
---|
1536 | STRENTRY("CodeEO "), /* 0x18 */
|
---|
1537 | STRENTRY("CodeEO Accessed "), /* 0x19 */
|
---|
1538 | STRENTRY("CodeER "), /* 0x1a */
|
---|
1539 | STRENTRY("CodeER Accessed "), /* 0x1b */
|
---|
1540 | STRENTRY("CodeConfEO "), /* 0x1c */
|
---|
1541 | STRENTRY("CodeConfEO Accessed "), /* 0x1d */
|
---|
1542 | STRENTRY("CodeConfER "), /* 0x1e */
|
---|
1543 | STRENTRY("CodeConfER Accessed ") /* 0x1f */
|
---|
1544 | # undef SYSENTRY
|
---|
1545 | };
|
---|
1546 | # define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
|
---|
1547 | char szMsg[128];
|
---|
1548 | char *psz = &szMsg[0];
|
---|
1549 | unsigned i = pDesc->Gen.u1DescType << 4 | pDesc->Gen.u4Type;
|
---|
1550 | memcpy(psz, s_aTypes[i].psz, s_aTypes[i].cch);
|
---|
1551 | psz += s_aTypes[i].cch;
|
---|
1552 |
|
---|
1553 | if (pDesc->Gen.u1Present)
|
---|
1554 | ADD_STR(psz, "Present ");
|
---|
1555 | else
|
---|
1556 | ADD_STR(psz, "Not-Present ");
|
---|
1557 | # if HC_ARCH_BITS == 64
|
---|
1558 | if (pDesc->Gen.u1Long)
|
---|
1559 | ADD_STR(psz, "64-bit ");
|
---|
1560 | else
|
---|
1561 | ADD_STR(psz, "Comp ");
|
---|
1562 | # else
|
---|
1563 | if (pDesc->Gen.u1Granularity)
|
---|
1564 | ADD_STR(psz, "Page ");
|
---|
1565 | if (pDesc->Gen.u1DefBig)
|
---|
1566 | ADD_STR(psz, "32-bit ");
|
---|
1567 | else
|
---|
1568 | ADD_STR(psz, "16-bit ");
|
---|
1569 | # endif
|
---|
1570 | # undef ADD_STR
|
---|
1571 | *psz = '\0';
|
---|
1572 |
|
---|
1573 | /*
|
---|
1574 | * Limit and Base and format the output.
|
---|
1575 | */
|
---|
1576 | uint32_t u32Limit = X86DESC_LIMIT(*pDesc);
|
---|
1577 | if (pDesc->Gen.u1Granularity)
|
---|
1578 | u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
|
---|
1579 |
|
---|
1580 | # if HC_ARCH_BITS == 64
|
---|
1581 | uint64_t u32Base = X86DESC64_BASE(*pDesc);
|
---|
1582 |
|
---|
1583 | Log(("%s %04x - %RX64 %RX64 - base=%RX64 limit=%08x dpl=%d %s\n", pszMsg,
|
---|
1584 | Sel, pDesc->au64[0], pDesc->au64[1], u32Base, u32Limit, pDesc->Gen.u2Dpl, szMsg));
|
---|
1585 | # else
|
---|
1586 | uint32_t u32Base = X86DESC_BASE(*pDesc);
|
---|
1587 |
|
---|
1588 | Log(("%s %04x - %08x %08x - base=%08x limit=%08x dpl=%d %s\n", pszMsg,
|
---|
1589 | Sel, pDesc->au32[0], pDesc->au32[1], u32Base, u32Limit, pDesc->Gen.u2Dpl, szMsg));
|
---|
1590 | # endif
|
---|
1591 | }
|
---|
1592 |
|
---|
1593 | /**
|
---|
1594 | * Formats a full register dump.
|
---|
1595 | *
|
---|
1596 | * @param pVM The VM to operate on.
|
---|
1597 | * @param pVCpu The VMCPU to operate on.
|
---|
1598 | * @param pCtx The context to format.
|
---|
1599 | */
|
---|
1600 | VMMR0DECL(void) HWACCMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
|
---|
1601 | {
|
---|
1602 | /*
|
---|
1603 | * Format the flags.
|
---|
1604 | */
|
---|
1605 | static struct
|
---|
1606 | {
|
---|
1607 | const char *pszSet; const char *pszClear; uint32_t fFlag;
|
---|
1608 | } aFlags[] =
|
---|
1609 | {
|
---|
1610 | { "vip",NULL, X86_EFL_VIP },
|
---|
1611 | { "vif",NULL, X86_EFL_VIF },
|
---|
1612 | { "ac", NULL, X86_EFL_AC },
|
---|
1613 | { "vm", NULL, X86_EFL_VM },
|
---|
1614 | { "rf", NULL, X86_EFL_RF },
|
---|
1615 | { "nt", NULL, X86_EFL_NT },
|
---|
1616 | { "ov", "nv", X86_EFL_OF },
|
---|
1617 | { "dn", "up", X86_EFL_DF },
|
---|
1618 | { "ei", "di", X86_EFL_IF },
|
---|
1619 | { "tf", NULL, X86_EFL_TF },
|
---|
1620 | { "nt", "pl", X86_EFL_SF },
|
---|
1621 | { "nz", "zr", X86_EFL_ZF },
|
---|
1622 | { "ac", "na", X86_EFL_AF },
|
---|
1623 | { "po", "pe", X86_EFL_PF },
|
---|
1624 | { "cy", "nc", X86_EFL_CF },
|
---|
1625 | };
|
---|
1626 | char szEFlags[80];
|
---|
1627 | char *psz = szEFlags;
|
---|
1628 | uint32_t efl = pCtx->eflags.u32;
|
---|
1629 | for (unsigned i = 0; i < RT_ELEMENTS(aFlags); i++)
|
---|
1630 | {
|
---|
1631 | const char *pszAdd = aFlags[i].fFlag & efl ? aFlags[i].pszSet : aFlags[i].pszClear;
|
---|
1632 | if (pszAdd)
|
---|
1633 | {
|
---|
1634 | strcpy(psz, pszAdd);
|
---|
1635 | psz += strlen(pszAdd);
|
---|
1636 | *psz++ = ' ';
|
---|
1637 | }
|
---|
1638 | }
|
---|
1639 | psz[-1] = '\0';
|
---|
1640 |
|
---|
1641 |
|
---|
1642 | /*
|
---|
1643 | * Format the registers.
|
---|
1644 | */
|
---|
1645 | if (CPUMIsGuestIn64BitCode(pVCpu, CPUMCTX2CORE(pCtx)))
|
---|
1646 | {
|
---|
1647 | Log(("rax=%016RX64 rbx=%016RX64 rcx=%016RX64 rdx=%016RX64\n"
|
---|
1648 | "rsi=%016RX64 rdi=%016RX64 r8 =%016RX64 r9 =%016RX64\n"
|
---|
1649 | "r10=%016RX64 r11=%016RX64 r12=%016RX64 r13=%016RX64\n"
|
---|
1650 | "r14=%016RX64 r15=%016RX64\n"
|
---|
1651 | "rip=%016RX64 rsp=%016RX64 rbp=%016RX64 iopl=%d %*s\n"
|
---|
1652 | "cs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
|
---|
1653 | "ds={%04x base=%016RX64 limit=%08x flags=%08x}\n"
|
---|
1654 | "es={%04x base=%016RX64 limit=%08x flags=%08x}\n"
|
---|
1655 | "fs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
|
---|
1656 | "gs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
|
---|
1657 | "ss={%04x base=%016RX64 limit=%08x flags=%08x}\n"
|
---|
1658 | "cr0=%016RX64 cr2=%016RX64 cr3=%016RX64 cr4=%016RX64\n"
|
---|
1659 | "dr0=%016RX64 dr1=%016RX64 dr2=%016RX64 dr3=%016RX64\n"
|
---|
1660 | "dr4=%016RX64 dr5=%016RX64 dr6=%016RX64 dr7=%016RX64\n"
|
---|
1661 | "gdtr=%016RX64:%04x idtr=%016RX64:%04x eflags=%08x\n"
|
---|
1662 | "ldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
|
---|
1663 | "tr ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
|
---|
1664 | "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
|
---|
1665 | ,
|
---|
1666 | pCtx->rax, pCtx->rbx, pCtx->rcx, pCtx->rdx, pCtx->rsi, pCtx->rdi,
|
---|
1667 | pCtx->r8, pCtx->r9, pCtx->r10, pCtx->r11, pCtx->r12, pCtx->r13,
|
---|
1668 | pCtx->r14, pCtx->r15,
|
---|
1669 | pCtx->rip, pCtx->rsp, pCtx->rbp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
|
---|
1670 | (RTSEL)pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u,
|
---|
1671 | (RTSEL)pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u,
|
---|
1672 | (RTSEL)pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u,
|
---|
1673 | (RTSEL)pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u,
|
---|
1674 | (RTSEL)pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u,
|
---|
1675 | (RTSEL)pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u,
|
---|
1676 | pCtx->cr0, pCtx->cr2, pCtx->cr3, pCtx->cr4,
|
---|
1677 | pCtx->dr[0], pCtx->dr[1], pCtx->dr[2], pCtx->dr[3],
|
---|
1678 | pCtx->dr[4], pCtx->dr[5], pCtx->dr[6], pCtx->dr[7],
|
---|
1679 | pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
|
---|
1680 | (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
|
---|
1681 | (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
|
---|
1682 | pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp));
|
---|
1683 | }
|
---|
1684 | else
|
---|
1685 | Log(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
|
---|
1686 | "eip=%08x esp=%08x ebp=%08x iopl=%d %*s\n"
|
---|
1687 | "cs={%04x base=%016RX64 limit=%08x flags=%08x} dr0=%08RX64 dr1=%08RX64\n"
|
---|
1688 | "ds={%04x base=%016RX64 limit=%08x flags=%08x} dr2=%08RX64 dr3=%08RX64\n"
|
---|
1689 | "es={%04x base=%016RX64 limit=%08x flags=%08x} dr4=%08RX64 dr5=%08RX64\n"
|
---|
1690 | "fs={%04x base=%016RX64 limit=%08x flags=%08x} dr6=%08RX64 dr7=%08RX64\n"
|
---|
1691 | "gs={%04x base=%016RX64 limit=%08x flags=%08x} cr0=%08RX64 cr2=%08RX64\n"
|
---|
1692 | "ss={%04x base=%016RX64 limit=%08x flags=%08x} cr3=%08RX64 cr4=%08RX64\n"
|
---|
1693 | "gdtr=%016RX64:%04x idtr=%016RX64:%04x eflags=%08x\n"
|
---|
1694 | "ldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
|
---|
1695 | "tr ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
|
---|
1696 | "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
|
---|
1697 | ,
|
---|
1698 | pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
|
---|
1699 | pCtx->eip, pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
|
---|
1700 | (RTSEL)pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pCtx->dr[0], pCtx->dr[1],
|
---|
1701 | (RTSEL)pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pCtx->dr[2], pCtx->dr[3],
|
---|
1702 | (RTSEL)pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pCtx->dr[4], pCtx->dr[5],
|
---|
1703 | (RTSEL)pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pCtx->dr[6], pCtx->dr[7],
|
---|
1704 | (RTSEL)pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pCtx->cr0, pCtx->cr2,
|
---|
1705 | (RTSEL)pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pCtx->cr3, pCtx->cr4,
|
---|
1706 | pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
|
---|
1707 | (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
|
---|
1708 | (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
|
---|
1709 | pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp));
|
---|
1710 |
|
---|
1711 | Log(("FPU:\n"
|
---|
1712 | "FCW=%04x FSW=%04x FTW=%02x\n"
|
---|
1713 | "FOP=%04x FPUIP=%08x CS=%04x Rsvrd1=%04x\n"
|
---|
1714 | "FPUDP=%04x DS=%04x Rsvrd2=%04x MXCSR=%08x MXCSR_MASK=%08x\n"
|
---|
1715 | ,
|
---|
1716 | pCtx->fpu.FCW, pCtx->fpu.FSW, pCtx->fpu.FTW,
|
---|
1717 | pCtx->fpu.FOP, pCtx->fpu.FPUIP, pCtx->fpu.CS, pCtx->fpu.Rsvrd1,
|
---|
1718 | pCtx->fpu.FPUDP, pCtx->fpu.DS, pCtx->fpu.Rsrvd2,
|
---|
1719 | pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK));
|
---|
1720 |
|
---|
1721 |
|
---|
1722 | Log(("MSR:\n"
|
---|
1723 | "EFER =%016RX64\n"
|
---|
1724 | "PAT =%016RX64\n"
|
---|
1725 | "STAR =%016RX64\n"
|
---|
1726 | "CSTAR =%016RX64\n"
|
---|
1727 | "LSTAR =%016RX64\n"
|
---|
1728 | "SFMASK =%016RX64\n"
|
---|
1729 | "KERNELGSBASE =%016RX64\n",
|
---|
1730 | pCtx->msrEFER,
|
---|
1731 | pCtx->msrPAT,
|
---|
1732 | pCtx->msrSTAR,
|
---|
1733 | pCtx->msrCSTAR,
|
---|
1734 | pCtx->msrLSTAR,
|
---|
1735 | pCtx->msrSFMASK,
|
---|
1736 | pCtx->msrKERNELGSBASE));
|
---|
1737 |
|
---|
1738 | }
|
---|
1739 | #endif /* VBOX_STRICT */
|
---|
1740 |
|
---|
1741 | /* Dummy callback handlers. */
|
---|
1742 | VMMR0DECL(int) HWACCMR0DummyEnter(PVM pVM, PVMCPU pVCpu, PHWACCM_CPUINFO pCpu)
|
---|
1743 | {
|
---|
1744 | return VINF_SUCCESS;
|
---|
1745 | }
|
---|
1746 |
|
---|
1747 | VMMR0DECL(int) HWACCMR0DummyLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
|
---|
1748 | {
|
---|
1749 | return VINF_SUCCESS;
|
---|
1750 | }
|
---|
1751 |
|
---|
1752 | VMMR0DECL(int) HWACCMR0DummyEnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
|
---|
1753 | {
|
---|
1754 | return VINF_SUCCESS;
|
---|
1755 | }
|
---|
1756 |
|
---|
1757 | VMMR0DECL(int) HWACCMR0DummyDisableCpu(PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
|
---|
1758 | {
|
---|
1759 | return VINF_SUCCESS;
|
---|
1760 | }
|
---|
1761 |
|
---|
1762 | VMMR0DECL(int) HWACCMR0DummyInitVM(PVM pVM)
|
---|
1763 | {
|
---|
1764 | return VINF_SUCCESS;
|
---|
1765 | }
|
---|
1766 |
|
---|
1767 | VMMR0DECL(int) HWACCMR0DummyTermVM(PVM pVM)
|
---|
1768 | {
|
---|
1769 | return VINF_SUCCESS;
|
---|
1770 | }
|
---|
1771 |
|
---|
1772 | VMMR0DECL(int) HWACCMR0DummySetupVM(PVM pVM)
|
---|
1773 | {
|
---|
1774 | return VINF_SUCCESS;
|
---|
1775 | }
|
---|
1776 |
|
---|
1777 | VMMR0DECL(int) HWACCMR0DummyRunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
|
---|
1778 | {
|
---|
1779 | return VINF_SUCCESS;
|
---|
1780 | }
|
---|
1781 |
|
---|
1782 | VMMR0DECL(int) HWACCMR0DummySaveHostState(PVM pVM, PVMCPU pVCpu)
|
---|
1783 | {
|
---|
1784 | return VINF_SUCCESS;
|
---|
1785 | }
|
---|
1786 |
|
---|
1787 | VMMR0DECL(int) HWACCMR0DummyLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
|
---|
1788 | {
|
---|
1789 | return VINF_SUCCESS;
|
---|
1790 | }
|
---|