VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp@ 7692

Last change on this file since 7692 was 7574, checked in by vboxsync, 17 years ago

And again

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 35.8 KB
Line 
1/* $Id: HWACCMR0.cpp 7574 2008-03-26 12:44:48Z vboxsync $ */
2/** @file
3 * HWACCM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_HWACCM
23#include <VBox/hwaccm.h>
24#include "HWACCMInternal.h"
25#include <VBox/vm.h>
26#include <VBox/x86.h>
27#include <VBox/hwacc_vmx.h>
28#include <VBox/hwacc_svm.h>
29#include <VBox/pgm.h>
30#include <VBox/pdm.h>
31#include <VBox/err.h>
32#include <VBox/log.h>
33#include <VBox/selm.h>
34#include <VBox/iom.h>
35#include <iprt/param.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/string.h>
39#include <iprt/memobj.h>
40#include <iprt/cpuset.h>
41#include "HWVMXR0.h"
42#include "HWSVMR0.h"
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
48static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
49static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
50static int hwaccmr0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu);
51
52/*******************************************************************************
53* Local Variables *
54*******************************************************************************/
55static struct
56{
57 struct
58 {
59 RTR0MEMOBJ pMemObj;
60 bool fVMXConfigured;
61 bool fSVMConfigured;
62 } aCpuInfo[RTCPUSET_MAX_CPUS];
63
64 struct
65 {
66 /** Set by the ring-0 driver to indicate VMX is supported by the CPU. */
67 bool fSupported;
68
69 /** Host CR4 value (set by ring-0 VMX init) */
70 uint64_t hostCR4;
71
72 /** VMX MSR values */
73 struct
74 {
75 uint64_t feature_ctrl;
76 uint64_t vmx_basic_info;
77 uint64_t vmx_pin_ctls;
78 uint64_t vmx_proc_ctls;
79 uint64_t vmx_exit;
80 uint64_t vmx_entry;
81 uint64_t vmx_misc;
82 uint64_t vmx_cr0_fixed0;
83 uint64_t vmx_cr0_fixed1;
84 uint64_t vmx_cr4_fixed0;
85 uint64_t vmx_cr4_fixed1;
86 uint64_t vmx_vmcs_enum;
87 } msr;
88 /* Last instruction error */
89 uint32_t ulLastInstrError;
90 } vmx;
91 struct
92 {
93 /** Set by the ring-0 driver to indicate SVM is supported by the CPU. */
94 bool fSupported;
95
96 /** SVM revision. */
97 uint32_t u32Rev;
98
99 /** Maximum ASID allowed. */
100 uint32_t u32MaxASID;
101 } svm;
102 /** Saved error from detection */
103 int32_t lLastError;
104
105 struct
106 {
107 uint32_t u32AMDFeatureECX;
108 uint32_t u32AMDFeatureEDX;
109 } cpuid;
110
111 HWACCMSTATE enmHwAccmState;
112} HWACCMR0Globals;
113
114
115
116/**
117 * Does global Ring-0 HWACCM initialization.
118 *
119 * @returns VBox status code.
120 */
121HWACCMR0DECL(int) HWACCMR0Init()
122{
123 int rc;
124
125 memset(&HWACCMR0Globals, 0, sizeof(HWACCMR0Globals));
126 HWACCMR0Globals.enmHwAccmState = HWACCMSTATE_UNINITIALIZED;
127
128#ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL /* paranoia */
129
130 /*
131 * Check for VT-x and AMD-V capabilities
132 */
133 if (ASMHasCpuId())
134 {
135 uint32_t u32FeaturesECX;
136 uint32_t u32Dummy;
137 uint32_t u32FeaturesEDX;
138 uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX;
139
140 ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);
141 ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
142 /* Query AMD features. */
143 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &HWACCMR0Globals.cpuid.u32AMDFeatureECX, &HWACCMR0Globals.cpuid.u32AMDFeatureEDX);
144
145 if ( u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX
146 && u32VendorECX == X86_CPUID_VENDOR_INTEL_ECX
147 && u32VendorEDX == X86_CPUID_VENDOR_INTEL_EDX
148 )
149 {
150 /*
151 * Read all VMX MSRs if VMX is available. (same goes for RDMSR/WRMSR)
152 * We also assume all VMX-enabled CPUs support fxsave/fxrstor.
153 */
154 if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
155 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
156 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
157 )
158 {
159 int aRc[RTCPUSET_MAX_CPUS];
160 RTCPUID idCpu = 0;
161
162 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
163
164 /* We need to check if VT-x has been properly initialized on all CPUs. Some BIOSes do a lousy job. */
165 memset(aRc, 0, sizeof(aRc));
166 HWACCMR0Globals.lLastError = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
167
168 /* Check the return code of all invocations. */
169 if (VBOX_SUCCESS(HWACCMR0Globals.lLastError))
170 HWACCMR0Globals.lLastError = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
171
172 if (VBOX_SUCCESS(HWACCMR0Globals.lLastError))
173 {
174 /* Reread in case we've changed it. */
175 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
176
177 if ( (HWACCMR0Globals.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
178 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
179 {
180 HWACCMR0Globals.vmx.fSupported = true;
181 HWACCMR0Globals.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
182 HWACCMR0Globals.vmx.msr.vmx_pin_ctls = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
183 HWACCMR0Globals.vmx.msr.vmx_proc_ctls = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
184 HWACCMR0Globals.vmx.msr.vmx_exit = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
185 HWACCMR0Globals.vmx.msr.vmx_entry = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
186 HWACCMR0Globals.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC);
187 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
188 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
189 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
190 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
191 HWACCMR0Globals.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
192 HWACCMR0Globals.vmx.hostCR4 = ASMGetCR4();
193
194#if HC_ARCH_BITS == 64
195 RTR0MEMOBJ pScatchMemObj;
196 void *pvScatchPage;
197 RTHCPHYS pScatchPagePhys;
198
199 rc = RTR0MemObjAllocCont(&pScatchMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
200 if (RT_FAILURE(rc))
201 return rc;
202
203 pvScatchPage = RTR0MemObjAddress(pScatchMemObj);
204 pScatchPagePhys = RTR0MemObjGetPagePhysAddr(pScatchMemObj, 0);
205 memset(pvScatchPage, 0, PAGE_SIZE);
206
207 /* Set revision dword at the beginning of the structure. */
208 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(HWACCMR0Globals.vmx.msr.vmx_basic_info);
209
210 /* Make sure we don't get rescheduled to another cpu during this probe. */
211 RTCCUINTREG fFlags = ASMIntDisableFlags();
212
213 /*
214 * Check CR4.VMXE
215 */
216 if (!(HWACCMR0Globals.vmx.hostCR4 & X86_CR4_VMXE))
217 {
218 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we
219 * try to execute the VMX instructions...
220 */
221 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4 | X86_CR4_VMXE);
222 }
223
224 /* Enter VMX Root Mode */
225 rc = VMXEnable(pScatchPagePhys);
226 if (VBOX_FAILURE(rc))
227 {
228 /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because
229 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify this bit)
230 * (b) turning off paging causes a #GP (unavoidable when switching from long to 32 bits mode)
231 *
232 * They should fix their code, but until they do we simply refuse to run.
233 */
234 HWACCMR0Globals.lLastError = VERR_VMX_IN_VMX_ROOT_MODE;
235 HWACCMR0Globals.vmx.fSupported = false;
236 }
237 else
238 VMXDisable();
239
240 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it wasn't so before (some software could incorrectly think it's in VMX mode) */
241 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4);
242 ASMSetFlags(fFlags);
243
244 RTR0MemObjFree(pScatchMemObj, false);
245#endif
246 }
247 else
248 {
249 AssertFailed(); /* can't hit this case anymore */
250 HWACCMR0Globals.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
251 }
252 }
253#ifdef LOG_ENABLED
254 else
255 SUPR0Printf("HWACCMR0InitCPU failed with rc=%d\n", HWACCMR0Globals.lLastError);
256#endif
257 }
258 else
259 HWACCMR0Globals.lLastError = VERR_VMX_NO_VMX;
260 }
261 else
262 if ( u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX
263 && u32VendorECX == X86_CPUID_VENDOR_AMD_ECX
264 && u32VendorEDX == X86_CPUID_VENDOR_AMD_EDX
265 )
266 {
267 /*
268 * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
269 * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
270 */
271 if ( (HWACCMR0Globals.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
272 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
273 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
274 )
275 {
276 int aRc[RTCPUSET_MAX_CPUS];
277 RTCPUID idCpu = 0;
278
279 /* We need to check if AMD-V has been properly initialized on all CPUs. Some BIOSes might do a poor job. */
280 memset(aRc, 0, sizeof(aRc));
281 rc = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
282 AssertRC(rc);
283
284 /* Check the return code of all invocations. */
285 if (VBOX_SUCCESS(rc))
286 rc = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
287
288 AssertMsg(VBOX_SUCCESS(rc), ("HWACCMR0InitCPU failed for cpu %d with rc=%d\n", idCpu, rc));
289
290 if (VBOX_SUCCESS(rc))
291 {
292 /* Query AMD features. */
293 ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.svm.u32MaxASID, &u32Dummy, &u32Dummy);
294
295 HWACCMR0Globals.svm.fSupported = true;
296 }
297 else
298 HWACCMR0Globals.lLastError = rc;
299 }
300 else
301 HWACCMR0Globals.lLastError = VERR_SVM_NO_SVM;
302 }
303 else
304 HWACCMR0Globals.lLastError = VERR_HWACCM_UNKNOWN_CPU;
305 }
306 else
307 HWACCMR0Globals.lLastError = VERR_HWACCM_NO_CPUID;
308
309#endif /* !VBOX_WITH_HYBIRD_32BIT_KERNEL */
310
311 return VINF_SUCCESS;
312}
313
314
315/**
316 * Checks the error code array filled in for each cpu in the system.
317 *
318 * @returns VBox status code.
319 * @param paRc Error code array
320 * @param cErrorCodes Array size
321 * @param pidCpu Value of the first cpu that set an error (out)
322 */
323static int hwaccmr0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu)
324{
325 int rc = VINF_SUCCESS;
326
327 Assert(cErrorCodes == RTCPUSET_MAX_CPUS);
328
329 for (unsigned i=0;i<cErrorCodes;i++)
330 {
331 if (RTMpIsCpuOnline(i))
332 {
333 if (VBOX_FAILURE(paRc[i]))
334 {
335 rc = paRc[i];
336 *pidCpu = i;
337 break;
338 }
339 }
340 }
341 return rc;
342}
343
344/**
345 * Does global Ring-0 HWACCM termination.
346 *
347 * @returns VBox status code.
348 */
349HWACCMR0DECL(int) HWACCMR0Term()
350{
351 int aRc[RTCPUSET_MAX_CPUS];
352
353 memset(aRc, 0, sizeof(aRc));
354 int rc = RTMpOnAll(HWACCMR0DisableCPU, aRc, NULL);
355 AssertRC(rc);
356
357 /* Free the per-cpu pages used for VT-x and AMD-V */
358 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
359 {
360 AssertMsg(VBOX_SUCCESS(aRc[i]), ("HWACCMR0DisableCPU failed for cpu %d with rc=%d\n", i, aRc[i]));
361 if (HWACCMR0Globals.aCpuInfo[i].pMemObj)
362 {
363 RTR0MemObjFree(HWACCMR0Globals.aCpuInfo[i].pMemObj, false);
364 HWACCMR0Globals.aCpuInfo[i].pMemObj = NULL;
365 }
366 }
367 return rc;
368}
369
370
371/**
372 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
373 * is to be called on the target cpus.
374 *
375 * @param idCpu The identifier for the CPU the function is called on.
376 * @param pvUser1 The 1st user argument.
377 * @param pvUser2 The 2nd user argument.
378 */
379static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
380{
381 unsigned u32VendorEBX = (uintptr_t)pvUser1;
382 int *paRc = (int *)pvUser2;
383 uint64_t val;
384
385#ifdef LOG_ENABLED
386 SUPR0Printf("HWACCMR0InitCPU cpu %d\n", idCpu);
387#endif
388
389 if (u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX)
390 {
391 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
392
393 /*
394 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP.
395 * Once the lock bit is set, this MSR can no longer be modified.
396 */
397 if (!(val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)))
398 {
399 /* MSR is not yet locked; we can change it ourselves here */
400 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, HWACCMR0Globals.vmx.msr.feature_ctrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK);
401 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
402 }
403 if ( (val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
404 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
405 paRc[idCpu] = VINF_SUCCESS;
406 else
407 paRc[idCpu] = VERR_VMX_MSR_LOCKED_OR_DISABLED;
408 }
409 else
410 if (u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX)
411 {
412 /* Check if SVM is disabled */
413 val = ASMRdMsr(MSR_K8_VM_CR);
414 if (!(val & MSR_K8_VM_CR_SVM_DISABLE))
415 {
416 /* Turn on SVM in the EFER MSR. */
417 val = ASMRdMsr(MSR_K6_EFER);
418 if (!(val & MSR_K6_EFER_SVME))
419 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
420
421 /* Paranoia. */
422 val = ASMRdMsr(MSR_K6_EFER);
423 if (val & MSR_K6_EFER_SVME)
424 paRc[idCpu] = VINF_SUCCESS;
425 else
426 paRc[idCpu] = VERR_SVM_ILLEGAL_EFER_MSR;
427 }
428 else
429 paRc[idCpu] = HWACCMR0Globals.lLastError = VERR_SVM_DISABLED;
430 }
431 else
432 AssertFailed(); /* can't happen */
433 return;
434}
435
436
437/**
438 * Sets up HWACCM on all cpus.
439 *
440 * @returns VBox status code.
441 * @param pVM The VM to operate on.
442 * @param enmNewHwAccmState New hwaccm state
443 *
444 */
445HWACCMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM, HWACCMSTATE enmNewHwAccmState)
446{
447 Assert(sizeof(HWACCMR0Globals.enmHwAccmState) == sizeof(uint32_t));
448 if (ASMAtomicCmpXchgU32((volatile uint32_t *)&HWACCMR0Globals.enmHwAccmState, enmNewHwAccmState, HWACCMSTATE_UNINITIALIZED))
449 {
450 int aRc[RTCPUSET_MAX_CPUS];
451 RTCPUID idCpu = 0;
452
453 /* Don't setup hwaccm as that might not work (vt-x & 64 bits raw mode) */
454 if (enmNewHwAccmState == HWACCMSTATE_DISABLED)
455 return VINF_SUCCESS;
456
457 memset(aRc, 0, sizeof(aRc));
458
459 /* Allocate one page per cpu for the global vt-x and amd-v pages */
460 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
461 {
462 Assert(!HWACCMR0Globals.aCpuInfo[i].pMemObj);
463
464 /** @todo this is rather dangerous if cpus can be taken offline; we don't care for now */
465 if (RTMpIsCpuOnline(i))
466 {
467 int rc = RTR0MemObjAllocCont(&HWACCMR0Globals.aCpuInfo[i].pMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
468 AssertRC(rc);
469 if (RT_FAILURE(rc))
470 return rc;
471
472 void *pvR0 = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[i].pMemObj);
473 Assert(pvR0);
474 memset(pvR0, 0, PAGE_SIZE);
475
476#ifdef LOG_ENABLED
477 SUPR0Printf("address %x phys %x\n", pvR0, (uint32_t)RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[i].pMemObj, 0));
478#endif
479 }
480 }
481 /* First time, so initialize each cpu/core */
482 int rc = RTMpOnAll(HWACCMR0EnableCPU, (void *)pVM, aRc);
483
484 /* Check the return code of all invocations. */
485 if (VBOX_SUCCESS(rc))
486 rc = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
487
488 AssertMsg(VBOX_SUCCESS(rc), ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", idCpu, rc));
489 return rc;
490 }
491
492 if (HWACCMR0Globals.enmHwAccmState == enmNewHwAccmState)
493 return VINF_SUCCESS;
494
495 /* Request to change the mode is not allowed */
496 return VERR_ACCESS_DENIED;
497}
498
499/**
500 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
501 * is to be called on the target cpus.
502 *
503 * @param idCpu The identifier for the CPU the function is called on.
504 * @param pvUser1 The 1st user argument.
505 * @param pvUser2 The 2nd user argument.
506 */
507static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
508{
509 PVM pVM = (PVM)pvUser1;
510 int *paRc = (int *)pvUser2;
511 void *pvPageCpu;
512 RTHCPHYS pPageCpuPhys;
513
514 Assert(pVM);
515 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
516
517 /* Should never happen */
518 if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
519 {
520 AssertFailed();
521 return;
522 }
523
524 pvPageCpu = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
525 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
526
527 if (pVM->hwaccm.s.vmx.fSupported)
528 {
529 paRc[idCpu] = VMXR0EnableCpu(idCpu, pVM, pvPageCpu, pPageCpuPhys);
530 AssertRC(paRc[idCpu]);
531 if (VBOX_SUCCESS(paRc[idCpu]))
532 HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured = true;
533 }
534 else
535 if (pVM->hwaccm.s.svm.fSupported)
536 {
537 paRc[idCpu] = SVMR0EnableCpu(idCpu, pVM, pvPageCpu, pPageCpuPhys);
538 AssertRC(paRc[idCpu]);
539 if (VBOX_SUCCESS(paRc[idCpu]))
540 HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured = true;
541 }
542 return;
543}
544
545/**
546 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
547 * is to be called on the target cpus.
548 *
549 * @param idCpu The identifier for the CPU the function is called on.
550 * @param pvUser1 The 1st user argument.
551 * @param pvUser2 The 2nd user argument.
552 */
553static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
554{
555 void *pvPageCpu;
556 RTHCPHYS pPageCpuPhys;
557 int *paRc = (int *)pvUser1;
558
559 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
560
561 if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
562 return;
563
564 pvPageCpu = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
565 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
566
567 if (HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured)
568 {
569 paRc[idCpu] = VMXR0DisableCpu(idCpu, pvPageCpu, pPageCpuPhys);
570 AssertRC(paRc[idCpu]);
571 HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured = false;
572 }
573 else
574 if (HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured)
575 {
576 paRc[idCpu] = SVMR0DisableCpu(idCpu, pvPageCpu, pPageCpuPhys);
577 AssertRC(paRc[idCpu]);
578 HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured = false;
579 }
580 return;
581}
582
583
584/**
585 * Does Ring-0 per VM HWACCM initialization.
586 *
587 * This is mainly to check that the Host CPU mode is compatible
588 * with VMX.
589 *
590 * @returns VBox status code.
591 * @param pVM The VM to operate on.
592 */
593HWACCMR0DECL(int) HWACCMR0InitVM(PVM pVM)
594{
595 int rc = VINF_SUCCESS;
596
597 AssertReturn(pVM, VERR_INVALID_PARAMETER);
598
599#ifdef LOG_ENABLED
600 SUPR0Printf("HWACCMR0InitVM: %p\n", pVM);
601#endif
602
603 pVM->hwaccm.s.vmx.fSupported = HWACCMR0Globals.vmx.fSupported;
604 pVM->hwaccm.s.svm.fSupported = HWACCMR0Globals.svm.fSupported;
605
606 pVM->hwaccm.s.vmx.msr.feature_ctrl = HWACCMR0Globals.vmx.msr.feature_ctrl;
607 pVM->hwaccm.s.vmx.hostCR4 = HWACCMR0Globals.vmx.hostCR4;
608 pVM->hwaccm.s.vmx.msr.vmx_basic_info = HWACCMR0Globals.vmx.msr.vmx_basic_info;
609 pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = HWACCMR0Globals.vmx.msr.vmx_pin_ctls;
610 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = HWACCMR0Globals.vmx.msr.vmx_proc_ctls;
611 pVM->hwaccm.s.vmx.msr.vmx_exit = HWACCMR0Globals.vmx.msr.vmx_exit;
612 pVM->hwaccm.s.vmx.msr.vmx_entry = HWACCMR0Globals.vmx.msr.vmx_entry;
613 pVM->hwaccm.s.vmx.msr.vmx_misc = HWACCMR0Globals.vmx.msr.vmx_misc;
614 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0;
615 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1;
616 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0;
617 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1;
618 pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = HWACCMR0Globals.vmx.msr.vmx_vmcs_enum;
619 pVM->hwaccm.s.svm.u32Rev = HWACCMR0Globals.svm.u32Rev;
620 pVM->hwaccm.s.svm.u32MaxASID = HWACCMR0Globals.svm.u32MaxASID;
621 pVM->hwaccm.s.cpuid.u32AMDFeatureECX = HWACCMR0Globals.cpuid.u32AMDFeatureECX;
622 pVM->hwaccm.s.cpuid.u32AMDFeatureEDX = HWACCMR0Globals.cpuid.u32AMDFeatureEDX;
623 pVM->hwaccm.s.lLastError = HWACCMR0Globals.lLastError;
624
625 /* Init a VT-x or AMD-V VM. */
626 if (pVM->hwaccm.s.vmx.fSupported)
627 rc = VMXR0InitVM(pVM);
628 else
629 if (pVM->hwaccm.s.svm.fSupported)
630 rc = SVMR0InitVM(pVM);
631
632 return rc;
633}
634
635
636/**
637 * Does Ring-0 per VM HWACCM termination.
638 *
639 * @returns VBox status code.
640 * @param pVM The VM to operate on.
641 */
642HWACCMR0DECL(int) HWACCMR0TermVM(PVM pVM)
643{
644 int rc = VINF_SUCCESS;
645
646 AssertReturn(pVM, VERR_INVALID_PARAMETER);
647
648#ifdef LOG_ENABLED
649 SUPR0Printf("HWACCMR0TermVM: %p\n", pVM);
650#endif
651
652 /* Terminate a VT-x or AMD-V VM. */
653 if (pVM->hwaccm.s.vmx.fSupported)
654 rc = VMXR0TermVM(pVM);
655 else
656 if (pVM->hwaccm.s.svm.fSupported)
657 rc = SVMR0TermVM(pVM);
658
659 return rc;
660}
661
662
663/**
664 * Sets up a VT-x or AMD-V session
665 *
666 * @returns VBox status code.
667 * @param pVM The VM to operate on.
668 */
669HWACCMR0DECL(int) HWACCMR0SetupVM(PVM pVM)
670{
671 int rc = VINF_SUCCESS;
672
673 AssertReturn(pVM, VERR_INVALID_PARAMETER);
674
675#ifdef LOG_ENABLED
676 SUPR0Printf("HWACCMR0SetupVM: %p\n", pVM);
677#endif
678
679 /* Setup VT-x or AMD-V. */
680 if (pVM->hwaccm.s.vmx.fSupported)
681 rc = VMXR0SetupVM(pVM);
682 else
683 if (pVM->hwaccm.s.svm.fSupported)
684 rc = SVMR0SetupVM(pVM);
685
686 return rc;
687}
688
689
690/**
691 * Enters the VT-x or AMD-V session
692 *
693 * @returns VBox status code.
694 * @param pVM The VM to operate on.
695 */
696HWACCMR0DECL(int) HWACCMR0Enter(PVM pVM)
697{
698 CPUMCTX *pCtx;
699 int rc;
700
701 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
702 if (VBOX_FAILURE(rc))
703 return rc;
704
705 /* Always load the guest's FPU/XMM state on-demand. */
706 CPUMDeactivateGuestFPUState(pVM);
707
708 /* Always reload the host context and the guest's CR0 register. (!!!!) */
709 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
710
711 if (pVM->hwaccm.s.vmx.fSupported)
712 {
713 rc = VMXR0Enter(pVM);
714 AssertRC(rc);
715 rc |= VMXR0SaveHostState(pVM);
716 AssertRC(rc);
717 rc |= VMXR0LoadGuestState(pVM, pCtx);
718 AssertRC(rc);
719 if (rc != VINF_SUCCESS)
720 return rc;
721 }
722 else
723 {
724 Assert(pVM->hwaccm.s.svm.fSupported);
725 rc = SVMR0Enter(pVM);
726 AssertRC(rc);
727 rc |= SVMR0LoadGuestState(pVM, pCtx);
728 AssertRC(rc);
729 if (rc != VINF_SUCCESS)
730 return rc;
731
732 }
733 return VINF_SUCCESS;
734}
735
736
737/**
738 * Leaves the VT-x or AMD-V session
739 *
740 * @returns VBox status code.
741 * @param pVM The VM to operate on.
742 */
743HWACCMR0DECL(int) HWACCMR0Leave(PVM pVM)
744{
745 CPUMCTX *pCtx;
746 int rc;
747
748 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
749 if (VBOX_FAILURE(rc))
750 return rc;
751
752 /** @note It's rather tricky with longjmps done by e.g. Log statements or the page fault handler. */
753 /* We must restore the host FPU here to make absolutely sure we don't leave the guest FPU state active
754 * or trash somebody else's FPU state.
755 */
756
757 /* Restore host FPU and XMM state if necessary. */
758 if (CPUMIsGuestFPUStateActive(pVM))
759 {
760 Log2(("CPUMRestoreHostFPUState\n"));
761 /** @note CPUMRestoreHostFPUState keeps the current CR0 intact. */
762 CPUMRestoreHostFPUState(pVM);
763
764 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
765 }
766
767 if (pVM->hwaccm.s.vmx.fSupported)
768 {
769 return VMXR0Leave(pVM);
770 }
771 else
772 {
773 Assert(pVM->hwaccm.s.svm.fSupported);
774 return SVMR0Leave(pVM);
775 }
776}
777
778/**
779 * Runs guest code in a hardware accelerated VM.
780 *
781 * @returns VBox status code.
782 * @param pVM The VM to operate on.
783 */
784HWACCMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM)
785{
786 CPUMCTX *pCtx;
787 int rc;
788
789 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
790 if (VBOX_FAILURE(rc))
791 return rc;
792
793 if (pVM->hwaccm.s.vmx.fSupported)
794 {
795 return VMXR0RunGuestCode(pVM, pCtx);
796 }
797 else
798 {
799 Assert(pVM->hwaccm.s.svm.fSupported);
800 return SVMR0RunGuestCode(pVM, pCtx);
801 }
802}
803
804
805#ifdef VBOX_STRICT
806#include <iprt/string.h>
807/**
808 * Dumps a descriptor.
809 *
810 * @param Desc Descriptor to dump.
811 * @param Sel Selector number.
812 * @param pszMsg Message to prepend the log entry with.
813 */
814HWACCMR0DECL(void) HWACCMR0DumpDescriptor(PX86DESCHC Desc, RTSEL Sel, const char *pszMsg)
815{
816 /*
817 * Make variable description string.
818 */
819 static struct
820 {
821 unsigned cch;
822 const char *psz;
823 } const aTypes[32] =
824 {
825 #define STRENTRY(str) { sizeof(str) - 1, str }
826
827 /* system */
828#if HC_ARCH_BITS == 64
829 STRENTRY("Reserved0 "), /* 0x00 */
830 STRENTRY("Reserved1 "), /* 0x01 */
831 STRENTRY("LDT "), /* 0x02 */
832 STRENTRY("Reserved3 "), /* 0x03 */
833 STRENTRY("Reserved4 "), /* 0x04 */
834 STRENTRY("Reserved5 "), /* 0x05 */
835 STRENTRY("Reserved6 "), /* 0x06 */
836 STRENTRY("Reserved7 "), /* 0x07 */
837 STRENTRY("Reserved8 "), /* 0x08 */
838 STRENTRY("TSS64Avail "), /* 0x09 */
839 STRENTRY("ReservedA "), /* 0x0a */
840 STRENTRY("TSS64Busy "), /* 0x0b */
841 STRENTRY("Call64 "), /* 0x0c */
842 STRENTRY("ReservedD "), /* 0x0d */
843 STRENTRY("Int64 "), /* 0x0e */
844 STRENTRY("Trap64 "), /* 0x0f */
845#else
846 STRENTRY("Reserved0 "), /* 0x00 */
847 STRENTRY("TSS16Avail "), /* 0x01 */
848 STRENTRY("LDT "), /* 0x02 */
849 STRENTRY("TSS16Busy "), /* 0x03 */
850 STRENTRY("Call16 "), /* 0x04 */
851 STRENTRY("Task "), /* 0x05 */
852 STRENTRY("Int16 "), /* 0x06 */
853 STRENTRY("Trap16 "), /* 0x07 */
854 STRENTRY("Reserved8 "), /* 0x08 */
855 STRENTRY("TSS32Avail "), /* 0x09 */
856 STRENTRY("ReservedA "), /* 0x0a */
857 STRENTRY("TSS32Busy "), /* 0x0b */
858 STRENTRY("Call32 "), /* 0x0c */
859 STRENTRY("ReservedD "), /* 0x0d */
860 STRENTRY("Int32 "), /* 0x0e */
861 STRENTRY("Trap32 "), /* 0x0f */
862#endif
863 /* non system */
864 STRENTRY("DataRO "), /* 0x10 */
865 STRENTRY("DataRO Accessed "), /* 0x11 */
866 STRENTRY("DataRW "), /* 0x12 */
867 STRENTRY("DataRW Accessed "), /* 0x13 */
868 STRENTRY("DataDownRO "), /* 0x14 */
869 STRENTRY("DataDownRO Accessed "), /* 0x15 */
870 STRENTRY("DataDownRW "), /* 0x16 */
871 STRENTRY("DataDownRW Accessed "), /* 0x17 */
872 STRENTRY("CodeEO "), /* 0x18 */
873 STRENTRY("CodeEO Accessed "), /* 0x19 */
874 STRENTRY("CodeER "), /* 0x1a */
875 STRENTRY("CodeER Accessed "), /* 0x1b */
876 STRENTRY("CodeConfEO "), /* 0x1c */
877 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
878 STRENTRY("CodeConfER "), /* 0x1e */
879 STRENTRY("CodeConfER Accessed ") /* 0x1f */
880 #undef SYSENTRY
881 };
882 #define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
883 char szMsg[128];
884 char *psz = &szMsg[0];
885 unsigned i = Desc->Gen.u1DescType << 4 | Desc->Gen.u4Type;
886 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
887 psz += aTypes[i].cch;
888
889 if (Desc->Gen.u1Present)
890 ADD_STR(psz, "Present ");
891 else
892 ADD_STR(psz, "Not-Present ");
893#if HC_ARCH_BITS == 64
894 if (Desc->Gen.u1Long)
895 ADD_STR(psz, "64-bit ");
896 else
897 ADD_STR(psz, "Comp ");
898#else
899 if (Desc->Gen.u1Granularity)
900 ADD_STR(psz, "Page ");
901 if (Desc->Gen.u1DefBig)
902 ADD_STR(psz, "32-bit ");
903 else
904 ADD_STR(psz, "16-bit ");
905#endif
906 #undef ADD_STR
907 *psz = '\0';
908
909 /*
910 * Limit and Base and format the output.
911 */
912 uint32_t u32Limit = Desc->Gen.u4LimitHigh << 16 | Desc->Gen.u16LimitLow;
913 if (Desc->Gen.u1Granularity)
914 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
915
916#if HC_ARCH_BITS == 64
917 uint64_t u32Base = ((uintptr_t)Desc->Gen.u32BaseHigh3 << 32ULL) | Desc->Gen.u8BaseHigh2 << 24ULL | Desc->Gen.u8BaseHigh1 << 16ULL | Desc->Gen.u16BaseLow;
918
919 Log(("%s %04x - %VX64 %VX64 - base=%VX64 limit=%08x dpl=%d %s\n", pszMsg,
920 Sel, Desc->au64[0], Desc->au64[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
921#else
922 uint32_t u32Base = Desc->Gen.u8BaseHigh2 << 24 | Desc->Gen.u8BaseHigh1 << 16 | Desc->Gen.u16BaseLow;
923
924 Log(("%s %04x - %08x %08x - base=%08x limit=%08x dpl=%d %s\n", pszMsg,
925 Sel, Desc->au32[0], Desc->au32[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
926#endif
927}
928
929/**
930 * Formats a full register dump.
931 *
932 * @param pCtx The context to format.
933 */
934HWACCMR0DECL(void) HWACCMDumpRegs(PCPUMCTX pCtx)
935{
936 /*
937 * Format the flags.
938 */
939 static struct
940 {
941 const char *pszSet; const char *pszClear; uint32_t fFlag;
942 } aFlags[] =
943 {
944 { "vip",NULL, X86_EFL_VIP },
945 { "vif",NULL, X86_EFL_VIF },
946 { "ac", NULL, X86_EFL_AC },
947 { "vm", NULL, X86_EFL_VM },
948 { "rf", NULL, X86_EFL_RF },
949 { "nt", NULL, X86_EFL_NT },
950 { "ov", "nv", X86_EFL_OF },
951 { "dn", "up", X86_EFL_DF },
952 { "ei", "di", X86_EFL_IF },
953 { "tf", NULL, X86_EFL_TF },
954 { "nt", "pl", X86_EFL_SF },
955 { "nz", "zr", X86_EFL_ZF },
956 { "ac", "na", X86_EFL_AF },
957 { "po", "pe", X86_EFL_PF },
958 { "cy", "nc", X86_EFL_CF },
959 };
960 char szEFlags[80];
961 char *psz = szEFlags;
962 uint32_t efl = pCtx->eflags.u32;
963 for (unsigned i = 0; i < ELEMENTS(aFlags); i++)
964 {
965 const char *pszAdd = aFlags[i].fFlag & efl ? aFlags[i].pszSet : aFlags[i].pszClear;
966 if (pszAdd)
967 {
968 strcpy(psz, pszAdd);
969 psz += strlen(pszAdd);
970 *psz++ = ' ';
971 }
972 }
973 psz[-1] = '\0';
974
975
976 /*
977 * Format the registers.
978 */
979 Log(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
980 "eip=%08x esp=%08x ebp=%08x iopl=%d %*s\n"
981 "cs={%04x base=%08x limit=%08x flags=%08x} dr0=%08RX64 dr1=%08RX64\n"
982 "ds={%04x base=%08x limit=%08x flags=%08x} dr2=%08RX64 dr3=%08RX64\n"
983 "es={%04x base=%08x limit=%08x flags=%08x} dr4=%08RX64 dr5=%08RX64\n"
984 "fs={%04x base=%08x limit=%08x flags=%08x} dr6=%08RX64 dr7=%08RX64\n"
985 ,
986 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
987 pCtx->eip, pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
988 (RTSEL)pCtx->cs, pCtx->csHid.u32Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pCtx->dr0, pCtx->dr1,
989 (RTSEL)pCtx->ds, pCtx->dsHid.u32Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pCtx->dr2, pCtx->dr3,
990 (RTSEL)pCtx->es, pCtx->esHid.u32Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pCtx->dr4, pCtx->dr5,
991 (RTSEL)pCtx->fs, pCtx->fsHid.u32Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pCtx->dr6, pCtx->dr7));
992
993 Log(("gs={%04x base=%08x limit=%08x flags=%08x} cr0=%08RX64 cr2=%08RX64\n"
994 "ss={%04x base=%08x limit=%08x flags=%08x} cr3=%08RX64 cr4=%08RX64\n"
995 "gdtr=%08x:%04x idtr=%08x:%04x eflags=%08x\n"
996 "ldtr={%04x base=%08x limit=%08x flags=%08x}\n"
997 "tr ={%04x base=%08x limit=%08x flags=%08x}\n"
998 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
999 "FCW=%04x FSW=%04x FTW=%04x\n",
1000 (RTSEL)pCtx->gs, pCtx->gsHid.u32Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pCtx->cr0, pCtx->cr2,
1001 (RTSEL)pCtx->ss, pCtx->ssHid.u32Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pCtx->cr3, pCtx->cr4,
1002 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
1003 (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u32Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1004 (RTSEL)pCtx->tr, pCtx->trHid.u32Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1005 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
1006 pCtx->fpu.FCW, pCtx->fpu.FSW, pCtx->fpu.FTW));
1007
1008
1009}
1010#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette