VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp@ 1283

Last change on this file since 1283 was 1283, checked in by vboxsync, 18 years ago

Added support for the hybrid darwin setup where the kernel is 32-bit but the cpu *might* be running in 64-bit mode.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 16.5 KB
Line 
1/* $Id: HWACCMR0.cpp 1283 2007-03-07 00:02:11Z vboxsync $ */
2/** @file
3 * HWACCM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_HWACCM
27#include <VBox/hwaccm.h>
28#include "HWACCMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/x86.h>
31#include <VBox/hwacc_vmx.h>
32#include <VBox/hwacc_svm.h>
33#include <VBox/pgm.h>
34#include <VBox/pdm.h>
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <VBox/selm.h>
38#include <VBox/iom.h>
39#include <iprt/param.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include "HWVMXR0.h"
43#include "HWSVMR0.h"
44
45/**
46 * Does Ring-0 HWACCM initialization.
47 *
48 * This is mainly to check that the Host CPU mode is compatible
49 * with VMX.
50 *
51 * @returns VBox status code.
52 * @param pVM The VM to operate on.
53 */
54HWACCMR0DECL(int) HWACCMR0Init(PVM pVM)
55{
56 LogComFlow(("HWACCMR0Init: %p\n", pVM));
57
58 pVM->hwaccm.s.vmx.fSupported = false;;
59 pVM->hwaccm.s.svm.fSupported = false;;
60
61#ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL /* paranoia */
62
63 /*
64 * Check for VMX capabilities
65 */
66 if (ASMHasCpuId())
67 {
68 uint32_t u32FeaturesECX;
69 uint32_t u32Dummy;
70 uint32_t u32FeaturesEDX;
71 uint32_t u32Vendor1, u32Vendor2, u32Vendor3;
72
73 ASMCpuId(0, &u32Dummy, &u32Vendor1, &u32Vendor3, &u32Vendor2);
74 ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
75 /* Query AMD features. */
76 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &pVM->hwaccm.s.cpuid.u32AMDFeatureECX, &pVM->hwaccm.s.cpuid.u32AMDFeatureEDX);
77
78 if ( u32Vendor1 == 0x756e6547 /* Genu */
79 && u32Vendor2 == 0x49656e69 /* ineI */
80 && u32Vendor3 == 0x6c65746e /* ntel */
81 )
82 {
83 /*
84 * Read all VMX MSRs if VMX is available. (same goes for RDMSR/WRMSR)
85 * We also assume all VMX-enabled CPUs support fxsave/fxrstor.
86 */
87 if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
88 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
89 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
90 )
91 {
92 pVM->hwaccm.s.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
93 /*
94 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP.
95 * Once the lock bit is set, this MSR can no longer be modified.
96 */
97 if ( (pVM->hwaccm.s.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
98 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
99 {
100 pVM->hwaccm.s.vmx.fSupported = true;
101 pVM->hwaccm.s.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
102 pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
103 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
104 pVM->hwaccm.s.vmx.msr.vmx_exit = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
105 pVM->hwaccm.s.vmx.msr.vmx_entry = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
106 pVM->hwaccm.s.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC);
107 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
108 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
109 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
110 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
111 pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
112
113 /*
114 * Check CR4.VMXE
115 */
116 pVM->hwaccm.s.vmx.hostCR4 = ASMGetCR4();
117 if (!(pVM->hwaccm.s.vmx.hostCR4 & X86_CR4_VMXE))
118 {
119 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we
120 * try to execute the VMX instructions...
121 */
122 ASMSetCR4(pVM->hwaccm.s.vmx.hostCR4 | X86_CR4_VMXE);
123 }
124 }
125 }
126 }
127 else
128 if ( u32Vendor1 == 0x68747541 /* Auth */
129 && u32Vendor2 == 0x69746e65 /* enti */
130 && u32Vendor3 == 0x444d4163 /* cAMD */
131 )
132 {
133 /*
134 * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
135 * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
136 */
137 if ( (pVM->hwaccm.s.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
138 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
139 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
140 )
141 {
142 uint64_t val;
143
144 /* Turn on SVM in the EFER MSR. */
145 val = ASMRdMsr(MSR_K6_EFER);
146 if (!(val & MSR_K6_EFER_SVME))
147 {
148 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
149 }
150 /* Paranoia. */
151 val = ASMRdMsr(MSR_K6_EFER);
152 if (val & MSR_K6_EFER_SVME)
153 {
154 /* Query AMD features. */
155 ASMCpuId(0x8000000A, &pVM->hwaccm.s.svm.u32Rev, &pVM->hwaccm.s.svm.u32MaxASID, &u32Dummy, &u32Dummy);
156
157 pVM->hwaccm.s.svm.fSupported = true;
158 }
159 else
160 AssertFailed();
161 }
162 }
163 }
164#endif /* !VBOX_WITH_HYBIRD_32BIT_KERNEL */
165
166 return VINF_SUCCESS;
167}
168
169
170/**
171 * Sets up and activates VMX
172 *
173 * @returns VBox status code.
174 * @param pVM The VM to operate on.
175 */
176HWACCMR0DECL(int) HWACCMR0SetupVMX(PVM pVM)
177{
178 int rc = VINF_SUCCESS;
179
180 if (pVM == NULL)
181 return VERR_INVALID_PARAMETER;
182
183 /* Setup Intel VMX. */
184 if (pVM->hwaccm.s.vmx.fSupported)
185 rc = VMXR0Setup(pVM);
186 else
187 rc = SVMR0Setup(pVM);
188
189 return rc;
190}
191
192
193/**
194 * Enable VMX or SVN
195 *
196 * @returns VBox status code.
197 * @param pVM The VM to operate on.
198 */
199HWACCMR0DECL(int) HWACCMR0Enable(PVM pVM)
200{
201 CPUMCTX *pCtx;
202 int rc;
203
204 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
205 if (VBOX_FAILURE(rc))
206 return rc;
207
208 /* Always load the guest's FPU/XMM state on-demand. */
209 CPUMDeactivateGuestFPUState(pVM);
210
211 /* Always reload the host context and the guest's CR0 register. (!!!!) */
212 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
213
214 if (pVM->hwaccm.s.vmx.fSupported)
215 {
216 rc = VMXR0Enable(pVM);
217 AssertRC(rc);
218 rc |= VMXR0SaveHostState(pVM);
219 AssertRC(rc);
220 rc |= VMXR0LoadGuestState(pVM, pCtx);
221 AssertRC(rc);
222 if (rc != VINF_SUCCESS)
223 return rc;
224 }
225 else
226 {
227 Assert(pVM->hwaccm.s.svm.fSupported);
228 rc = SVMR0Enable(pVM);
229 AssertRC(rc);
230 rc |= SVMR0LoadGuestState(pVM, pCtx);
231 AssertRC(rc);
232 if (rc != VINF_SUCCESS)
233 return rc;
234
235 }
236 return VINF_SUCCESS;
237}
238
239
240/**
241 * Disable VMX or SVN
242 *
243 * @returns VBox status code.
244 * @param pVM The VM to operate on.
245 */
246HWACCMR0DECL(int) HWACCMR0Disable(PVM pVM)
247{
248 CPUMCTX *pCtx;
249 int rc;
250
251 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
252 if (VBOX_FAILURE(rc))
253 return rc;
254
255 /** @note It's rather tricky with longjmps done by e.g. Log statements or the page fault handler. */
256 /* We must restore the host FPU here to make absolutely sure we don't leave the guest FPU state active
257 * or trash somebody else's FPU state.
258 */
259
260 /* Restore host FPU and XMM state if necessary. */
261 if (CPUMIsGuestFPUStateActive(pVM))
262 {
263 Log2(("CPUMRestoreHostFPUState\n"));
264 /** @note CPUMRestoreHostFPUState keeps the current CR0 intact. */
265 CPUMRestoreHostFPUState(pVM);
266
267 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
268 }
269
270 if (pVM->hwaccm.s.vmx.fSupported)
271 {
272 return VMXR0Disable(pVM);
273 }
274 else
275 {
276 Assert(pVM->hwaccm.s.svm.fSupported);
277 return SVMR0Disable(pVM);
278 }
279}
280
281/**
282 * Runs guest code in a hardware accelerated VM.
283 *
284 * @returns VBox status code.
285 * @param pVM The VM to operate on.
286 */
287HWACCMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM)
288{
289 CPUMCTX *pCtx;
290 int rc;
291
292 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
293 if (VBOX_FAILURE(rc))
294 return rc;
295
296 if (pVM->hwaccm.s.vmx.fSupported)
297 {
298 return VMXR0RunGuestCode(pVM, pCtx);
299 }
300 else
301 {
302 Assert(pVM->hwaccm.s.svm.fSupported);
303 return SVMR0RunGuestCode(pVM, pCtx);
304 }
305}
306
307
308#ifdef VBOX_STRICT
309#include <iprt/string.h>
310/**
311 * Dumps a descriptor.
312 *
313 * @param Desc Descriptor to dump.
314 * @param Sel Selector number.
315 * @param pszMsg Message to prepend the log entry with.
316 */
317HWACCMR0DECL(void) HWACCMR0DumpDescriptor(PVBOXDESC Desc, RTSEL Sel, const char *pszMsg)
318{
319 /*
320 * Make variable description string.
321 */
322 static struct
323 {
324 unsigned cch;
325 const char *psz;
326 } const aTypes[32] =
327 {
328 #define STRENTRY(str) { sizeof(str) - 1, str }
329 /* system */
330 STRENTRY("Reserved0 "), /* 0x00 */
331 STRENTRY("TSS16Avail "), /* 0x01 */
332 STRENTRY("LDT "), /* 0x02 */
333 STRENTRY("TSS16Busy "), /* 0x03 */
334 STRENTRY("Call16 "), /* 0x04 */
335 STRENTRY("Task "), /* 0x05 */
336 STRENTRY("Int16 "), /* 0x06 */
337 STRENTRY("Trap16 "), /* 0x07 */
338 STRENTRY("Reserved8 "), /* 0x08 */
339 STRENTRY("TSS32Avail "), /* 0x09 */
340 STRENTRY("ReservedA "), /* 0x0a */
341 STRENTRY("TSS32Busy "), /* 0x0b */
342 STRENTRY("Call32 "), /* 0x0c */
343 STRENTRY("ReservedD "), /* 0x0d */
344 STRENTRY("Int32 "), /* 0x0e */
345 STRENTRY("Trap32 "), /* 0x0f */
346 /* non system */
347 STRENTRY("DataRO "), /* 0x10 */
348 STRENTRY("DataRO Accessed "), /* 0x11 */
349 STRENTRY("DataRW "), /* 0x12 */
350 STRENTRY("DataRW Accessed "), /* 0x13 */
351 STRENTRY("DataDownRO "), /* 0x14 */
352 STRENTRY("DataDownRO Accessed "), /* 0x15 */
353 STRENTRY("DataDownRW "), /* 0x16 */
354 STRENTRY("DataDownRW Accessed "), /* 0x17 */
355 STRENTRY("CodeEO "), /* 0x18 */
356 STRENTRY("CodeEO Accessed "), /* 0x19 */
357 STRENTRY("CodeER "), /* 0x1a */
358 STRENTRY("CodeER Accessed "), /* 0x1b */
359 STRENTRY("CodeConfEO "), /* 0x1c */
360 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
361 STRENTRY("CodeConfER "), /* 0x1e */
362 STRENTRY("CodeConfER Accessed ") /* 0x1f */
363 #undef SYSENTRY
364 };
365 #define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
366 char szMsg[128];
367 char *psz = &szMsg[0];
368 unsigned i = Desc->Gen.u1DescType << 4 | Desc->Gen.u4Type;
369 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
370 psz += aTypes[i].cch;
371
372 if (Desc->Gen.u1Present)
373 ADD_STR(psz, "Present ");
374 else
375 ADD_STR(psz, "Not-Present ");
376 if (Desc->Gen.u1Granularity)
377 ADD_STR(psz, "Page ");
378 if (Desc->Gen.u1DefBig)
379 ADD_STR(psz, "32-bit ");
380 else
381 ADD_STR(psz, "16-bit ");
382 #undef ADD_STR
383 *psz = '\0';
384
385 /*
386 * Limit and Base and format the output.
387 */
388 uint32_t u32Limit = Desc->Gen.u4LimitHigh << 16 | Desc->Gen.u16LimitLow;
389 if (Desc->Gen.u1Granularity)
390 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
391 uint32_t u32Base = Desc->Gen.u8BaseHigh2 << 24 | Desc->Gen.u8BaseHigh1 << 16 | Desc->Gen.u16BaseLow;
392
393 Log(("%s %04x - %08x %08x - base=%08x limit=%08x dpl=%d %s\n", pszMsg,
394 Sel, Desc->au32[0], Desc->au32[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
395}
396
397/**
398 * Formats a full register dump.
399 *
400 * @param pCtx The context to format.
401 */
402HWACCMR0DECL(void) HWACCMDumpRegs(PCPUMCTX pCtx)
403{
404 /*
405 * Format the flags.
406 */
407 static struct
408 {
409 const char *pszSet; const char *pszClear; uint32_t fFlag;
410 } aFlags[] =
411 {
412 { "vip",NULL, X86_EFL_VIP },
413 { "vif",NULL, X86_EFL_VIF },
414 { "ac", NULL, X86_EFL_AC },
415 { "vm", NULL, X86_EFL_VM },
416 { "rf", NULL, X86_EFL_RF },
417 { "nt", NULL, X86_EFL_NT },
418 { "ov", "nv", X86_EFL_OF },
419 { "dn", "up", X86_EFL_DF },
420 { "ei", "di", X86_EFL_IF },
421 { "tf", NULL, X86_EFL_TF },
422 { "nt", "pl", X86_EFL_SF },
423 { "nz", "zr", X86_EFL_ZF },
424 { "ac", "na", X86_EFL_AF },
425 { "po", "pe", X86_EFL_PF },
426 { "cy", "nc", X86_EFL_CF },
427 };
428 char szEFlags[80];
429 char *psz = szEFlags;
430 uint32_t efl = pCtx->eflags.u32;
431 for (unsigned i = 0; i < ELEMENTS(aFlags); i++)
432 {
433 const char *pszAdd = aFlags[i].fFlag & efl ? aFlags[i].pszSet : aFlags[i].pszClear;
434 if (pszAdd)
435 {
436 strcpy(psz, pszAdd);
437 psz += strlen(pszAdd);
438 *psz++ = ' ';
439 }
440 }
441 psz[-1] = '\0';
442
443
444 /*
445 * Format the registers.
446 */
447 Log(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
448 "eip=%08x esp=%08x ebp=%08x iopl=%d %*s\n"
449 "cs={%04x base=%08x limit=%08x flags=%08x} dr0=%08x dr1=%08x\n"
450 "ds={%04x base=%08x limit=%08x flags=%08x} dr2=%08x dr3=%08x\n"
451 "es={%04x base=%08x limit=%08x flags=%08x} dr4=%08x dr5=%08x\n"
452 "fs={%04x base=%08x limit=%08x flags=%08x} dr6=%08x dr7=%08x\n"
453 ,
454 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
455 pCtx->eip, pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
456 (RTSEL)pCtx->cs, pCtx->csHid.u32Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pCtx->dr0, pCtx->dr1,
457 (RTSEL)pCtx->ds, pCtx->dsHid.u32Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pCtx->dr2, pCtx->dr3,
458 (RTSEL)pCtx->es, pCtx->esHid.u32Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pCtx->dr4, pCtx->dr5,
459 (RTSEL)pCtx->fs, pCtx->fsHid.u32Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pCtx->dr6, pCtx->dr7));
460
461 Log(("gs={%04x base=%08x limit=%08x flags=%08x} cr0=%08x cr2=%08x\n"
462 "ss={%04x base=%08x limit=%08x flags=%08x} cr3=%08x cr4=%08x\n"
463 "gdtr=%08x:%04x idtr=%08x:%04x eflags=%08x\n"
464 "ldtr={%04x base=%08x limit=%08x flags=%08x}\n"
465 "tr ={%04x base=%08x limit=%08x flags=%08x}\n"
466 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
467 "FCW=%04x FSW=%04x FTW=%04x\n",
468 (RTSEL)pCtx->gs, pCtx->gsHid.u32Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pCtx->cr0, pCtx->cr2,
469 (RTSEL)pCtx->ss, pCtx->ssHid.u32Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pCtx->cr3, pCtx->cr4,
470 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
471 (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u32Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
472 (RTSEL)pCtx->tr, pCtx->trHid.u32Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
473 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
474 pCtx->fpu.FCW, pCtx->fpu.FSW, pCtx->fpu.FTW));
475
476
477}
478#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette