VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp@ 15540

Last change on this file since 15540 was 15439, checked in by vboxsync, 16 years ago

Enable 64 bits guest support on 32 bits hosts. Only use rem64 if the guest OS type is 64 bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 55.7 KB
Line 
1/* $Id: HWACCMR0.cpp 15439 2008-12-13 12:48:22Z vboxsync $ */
2/** @file
3 * HWACCM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_HWACCM
27#include <VBox/hwaccm.h>
28#include "HWACCMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/x86.h>
31#include <VBox/hwacc_vmx.h>
32#include <VBox/hwacc_svm.h>
33#include <VBox/pgm.h>
34#include <VBox/pdm.h>
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <VBox/selm.h>
38#include <VBox/iom.h>
39#include <iprt/param.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <iprt/memobj.h>
44#include <iprt/cpuset.h>
45#include <iprt/power.h>
46#include "HWVMXR0.h"
47#include "HWSVMR0.h"
48
49/*******************************************************************************
50* Internal Functions *
51*******************************************************************************/
52static DECLCALLBACK(void) hwaccmR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
53static DECLCALLBACK(void) hwaccmR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
54static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
55static int hwaccmR0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu);
56static DECLCALLBACK(void) hwaccmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser);
57
58/*******************************************************************************
59* Global Variables *
60*******************************************************************************/
61
62static struct
63{
64 HWACCM_CPUINFO aCpuInfo[RTCPUSET_MAX_CPUS];
65
66 /** Ring 0 handlers for VT-x and AMD-V. */
67 DECLR0CALLBACKMEMBER(int, pfnEnterSession,(PVM pVM, PVMCPU pVCpu, PHWACCM_CPUINFO pCpu));
68 DECLR0CALLBACKMEMBER(int, pfnLeaveSession,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
69 DECLR0CALLBACKMEMBER(int, pfnSaveHostState,(PVM pVM, PVMCPU pVCpu));
70 DECLR0CALLBACKMEMBER(int, pfnLoadGuestState,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
71 DECLR0CALLBACKMEMBER(int, pfnRunGuestCode,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
72 DECLR0CALLBACKMEMBER(int, pfnEnableCpu, (PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys));
73 DECLR0CALLBACKMEMBER(int, pfnDisableCpu, (PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys));
74 DECLR0CALLBACKMEMBER(int, pfnInitVM, (PVM pVM));
75 DECLR0CALLBACKMEMBER(int, pfnTermVM, (PVM pVM));
76 DECLR0CALLBACKMEMBER(int, pfnSetupVM, (PVM pVM));
77
78 /** Maximum ASID allowed. */
79 uint32_t uMaxASID;
80
81 struct
82 {
83 /** Set by the ring-0 driver to indicate VMX is supported by the CPU. */
84 bool fSupported;
85 /** Whether we're using SUPR0EnableVTx or not. */
86 bool fUsingSUPR0EnableVTx;
87
88 /** Host CR4 value (set by ring-0 VMX init) */
89 uint64_t hostCR4;
90
91 /** VMX MSR values */
92 struct
93 {
94 uint64_t feature_ctrl;
95 uint64_t vmx_basic_info;
96 VMX_CAPABILITY vmx_pin_ctls;
97 VMX_CAPABILITY vmx_proc_ctls;
98 VMX_CAPABILITY vmx_proc_ctls2;
99 VMX_CAPABILITY vmx_exit;
100 VMX_CAPABILITY vmx_entry;
101 uint64_t vmx_misc;
102 uint64_t vmx_cr0_fixed0;
103 uint64_t vmx_cr0_fixed1;
104 uint64_t vmx_cr4_fixed0;
105 uint64_t vmx_cr4_fixed1;
106 uint64_t vmx_vmcs_enum;
107 uint64_t vmx_eptcaps;
108 } msr;
109 /* Last instruction error */
110 uint32_t ulLastInstrError;
111 } vmx;
112 struct
113 {
114 /** Set by the ring-0 driver to indicate SVM is supported by the CPU. */
115 bool fSupported;
116
117 /** SVM revision. */
118 uint32_t u32Rev;
119
120 /** SVM feature bits from cpuid 0x8000000a */
121 uint32_t u32Features;
122 } svm;
123 /** Saved error from detection */
124 int32_t lLastError;
125
126 struct
127 {
128 uint32_t u32AMDFeatureECX;
129 uint32_t u32AMDFeatureEDX;
130 } cpuid;
131
132 HWACCMSTATE enmHwAccmState;
133
134 volatile bool fSuspended;
135} HWACCMR0Globals;
136
137
138
139/**
140 * Does global Ring-0 HWACCM initialization.
141 *
142 * @returns VBox status code.
143 */
144VMMR0DECL(int) HWACCMR0Init(void)
145{
146 int rc;
147
148 memset(&HWACCMR0Globals, 0, sizeof(HWACCMR0Globals));
149 HWACCMR0Globals.enmHwAccmState = HWACCMSTATE_UNINITIALIZED;
150 for (unsigned i = 0; i < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); i++)
151 HWACCMR0Globals.aCpuInfo[i].pMemObj = NIL_RTR0MEMOBJ;
152
153 /* Fill in all callbacks with placeholders. */
154 HWACCMR0Globals.pfnEnterSession = HWACCMR0DummyEnter;
155 HWACCMR0Globals.pfnLeaveSession = HWACCMR0DummyLeave;
156 HWACCMR0Globals.pfnSaveHostState = HWACCMR0DummySaveHostState;
157 HWACCMR0Globals.pfnLoadGuestState = HWACCMR0DummyLoadGuestState;
158 HWACCMR0Globals.pfnRunGuestCode = HWACCMR0DummyRunGuestCode;
159 HWACCMR0Globals.pfnEnableCpu = HWACCMR0DummyEnableCpu;
160 HWACCMR0Globals.pfnDisableCpu = HWACCMR0DummyDisableCpu;
161 HWACCMR0Globals.pfnInitVM = HWACCMR0DummyInitVM;
162 HWACCMR0Globals.pfnTermVM = HWACCMR0DummyTermVM;
163 HWACCMR0Globals.pfnSetupVM = HWACCMR0DummySetupVM;
164
165 /*
166 * Check for VT-x and AMD-V capabilities
167 */
168 if (ASMHasCpuId())
169 {
170 uint32_t u32FeaturesECX;
171 uint32_t u32Dummy;
172 uint32_t u32FeaturesEDX;
173 uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX;
174
175 ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);
176 ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
177 /* Query AMD features. */
178 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &HWACCMR0Globals.cpuid.u32AMDFeatureECX, &HWACCMR0Globals.cpuid.u32AMDFeatureEDX);
179
180 if ( u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX
181 && u32VendorECX == X86_CPUID_VENDOR_INTEL_ECX
182 && u32VendorEDX == X86_CPUID_VENDOR_INTEL_EDX
183 )
184 {
185 /*
186 * Read all VMX MSRs if VMX is available. (same goes for RDMSR/WRMSR)
187 * We also assume all VMX-enabled CPUs support fxsave/fxrstor.
188 */
189 if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
190 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
191 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
192 )
193 {
194 int aRc[RTCPUSET_MAX_CPUS];
195 RTCPUID idCpu = 0;
196
197 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
198
199 /*
200 * First try use native kernel API for controlling VT-x.
201 * (This is only supported by some Mac OS X kernels atm.)
202 */
203 HWACCMR0Globals.lLastError = rc = SUPR0EnableVTx(true /* fEnable */);
204 if (rc != VERR_NOT_SUPPORTED)
205 {
206 AssertMsg(rc == VINF_SUCCESS || rc == VERR_VMX_IN_VMX_ROOT_MODE || rc == VERR_VMX_NO_VMX, ("%Rrc\n", rc));
207 HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx = true;
208 if (RT_SUCCESS(rc))
209 {
210 HWACCMR0Globals.vmx.fSupported = true;
211 rc = SUPR0EnableVTx(false /* fEnable */);
212 AssertRC(rc);
213 }
214 }
215 else
216 {
217 HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx = false;
218
219 /* We need to check if VT-x has been properly initialized on all CPUs. Some BIOSes do a lousy job. */
220 memset(aRc, 0, sizeof(aRc));
221 HWACCMR0Globals.lLastError = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
222
223 /* Check the return code of all invocations. */
224 if (RT_SUCCESS(HWACCMR0Globals.lLastError))
225 HWACCMR0Globals.lLastError = hwaccmR0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
226 }
227 if (RT_SUCCESS(HWACCMR0Globals.lLastError))
228 {
229 /* Reread in case we've changed it. */
230 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
231
232 if ( (HWACCMR0Globals.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
233 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
234 {
235 RTR0MEMOBJ pScatchMemObj;
236 void *pvScatchPage;
237 RTHCPHYS pScatchPagePhys;
238
239 HWACCMR0Globals.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
240 HWACCMR0Globals.vmx.msr.vmx_pin_ctls.u = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
241 HWACCMR0Globals.vmx.msr.vmx_proc_ctls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
242 HWACCMR0Globals.vmx.msr.vmx_exit.u = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
243 HWACCMR0Globals.vmx.msr.vmx_entry.u = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
244 HWACCMR0Globals.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC);
245 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
246 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
247 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
248 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
249 HWACCMR0Globals.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
250 /* VPID 16 bits ASID. */
251 HWACCMR0Globals.uMaxASID = 0x10000; /* exclusive */
252
253 if (HWACCMR0Globals.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
254 {
255 HWACCMR0Globals.vmx.msr.vmx_proc_ctls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
256 if (HWACCMR0Globals.vmx.msr.vmx_proc_ctls2.n.allowed1 & (VMX_VMCS_CTRL_PROC_EXEC2_EPT|VMX_VMCS_CTRL_PROC_EXEC2_VPID))
257 HWACCMR0Globals.vmx.msr.vmx_eptcaps = ASMRdMsr(MSR_IA32_VMX_EPT_CAPS);
258 }
259
260 if (!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
261 {
262 HWACCMR0Globals.vmx.hostCR4 = ASMGetCR4();
263
264 rc = RTR0MemObjAllocCont(&pScatchMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
265 if (RT_FAILURE(rc))
266 return rc;
267
268 pvScatchPage = RTR0MemObjAddress(pScatchMemObj);
269 pScatchPagePhys = RTR0MemObjGetPagePhysAddr(pScatchMemObj, 0);
270 memset(pvScatchPage, 0, PAGE_SIZE);
271
272 /* Set revision dword at the beginning of the structure. */
273 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(HWACCMR0Globals.vmx.msr.vmx_basic_info);
274
275 /* Make sure we don't get rescheduled to another cpu during this probe. */
276 RTCCUINTREG fFlags = ASMIntDisableFlags();
277
278 /*
279 * Check CR4.VMXE
280 */
281 if (!(HWACCMR0Globals.vmx.hostCR4 & X86_CR4_VMXE))
282 {
283 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we
284 * try to execute the VMX instructions...
285 */
286 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4 | X86_CR4_VMXE);
287 }
288
289 /* Enter VMX Root Mode */
290 rc = VMXEnable(pScatchPagePhys);
291 if (RT_FAILURE(rc))
292 {
293 /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because
294 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify this bit)
295 * (b) turning off paging causes a #GP (unavoidable when switching from long to 32 bits mode or 32 bits to PAE)
296 *
297 * They should fix their code, but until they do we simply refuse to run.
298 */
299 HWACCMR0Globals.lLastError = VERR_VMX_IN_VMX_ROOT_MODE;
300 }
301 else
302 {
303 HWACCMR0Globals.vmx.fSupported = true;
304 VMXDisable();
305 }
306
307 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it wasn't so before (some software could incorrectly think it's in VMX mode) */
308 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4);
309 ASMSetFlags(fFlags);
310
311 RTR0MemObjFree(pScatchMemObj, false);
312 if (RT_FAILURE(HWACCMR0Globals.lLastError))
313 return HWACCMR0Globals.lLastError;
314 }
315 }
316 else
317 {
318 AssertFailed(); /* can't hit this case anymore */
319 HWACCMR0Globals.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
320 }
321 }
322#ifdef LOG_ENABLED
323 else
324 SUPR0Printf("HWACCMR0InitCPU failed with rc=%d\n", HWACCMR0Globals.lLastError);
325#endif
326 }
327 else
328 HWACCMR0Globals.lLastError = VERR_VMX_NO_VMX;
329 }
330 else
331 if ( u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX
332 && u32VendorECX == X86_CPUID_VENDOR_AMD_ECX
333 && u32VendorEDX == X86_CPUID_VENDOR_AMD_EDX
334 )
335 {
336 /*
337 * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
338 * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
339 */
340 if ( (HWACCMR0Globals.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
341 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
342 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
343 )
344 {
345 int aRc[RTCPUSET_MAX_CPUS];
346 RTCPUID idCpu = 0;
347
348 /* We need to check if AMD-V has been properly initialized on all CPUs. Some BIOSes might do a poor job. */
349 memset(aRc, 0, sizeof(aRc));
350 rc = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
351 AssertRC(rc);
352
353 /* Check the return code of all invocations. */
354 if (RT_SUCCESS(rc))
355 rc = hwaccmR0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
356
357 AssertMsgRC(rc, ("HWACCMR0InitCPU failed for cpu %d with rc=%d\n", idCpu, rc));
358
359 if (RT_SUCCESS(rc))
360 {
361 /* Query AMD features. */
362 ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.uMaxASID, &u32Dummy, &HWACCMR0Globals.svm.u32Features);
363
364 HWACCMR0Globals.svm.fSupported = true;
365 }
366 else
367 HWACCMR0Globals.lLastError = rc;
368 }
369 else
370 HWACCMR0Globals.lLastError = VERR_SVM_NO_SVM;
371 }
372 else
373 HWACCMR0Globals.lLastError = VERR_HWACCM_UNKNOWN_CPU;
374 }
375 else
376 HWACCMR0Globals.lLastError = VERR_HWACCM_NO_CPUID;
377
378 if (HWACCMR0Globals.vmx.fSupported)
379 {
380 HWACCMR0Globals.pfnEnterSession = VMXR0Enter;
381 HWACCMR0Globals.pfnLeaveSession = VMXR0Leave;
382 HWACCMR0Globals.pfnSaveHostState = VMXR0SaveHostState;
383 HWACCMR0Globals.pfnLoadGuestState = VMXR0LoadGuestState;
384 HWACCMR0Globals.pfnRunGuestCode = VMXR0RunGuestCode;
385 HWACCMR0Globals.pfnEnableCpu = VMXR0EnableCpu;
386 HWACCMR0Globals.pfnDisableCpu = VMXR0DisableCpu;
387 HWACCMR0Globals.pfnInitVM = VMXR0InitVM;
388 HWACCMR0Globals.pfnTermVM = VMXR0TermVM;
389 HWACCMR0Globals.pfnSetupVM = VMXR0SetupVM;
390 }
391 else
392 if (HWACCMR0Globals.svm.fSupported)
393 {
394 HWACCMR0Globals.pfnEnterSession = SVMR0Enter;
395 HWACCMR0Globals.pfnLeaveSession = SVMR0Leave;
396 HWACCMR0Globals.pfnSaveHostState = SVMR0SaveHostState;
397 HWACCMR0Globals.pfnLoadGuestState = SVMR0LoadGuestState;
398 HWACCMR0Globals.pfnRunGuestCode = SVMR0RunGuestCode;
399 HWACCMR0Globals.pfnEnableCpu = SVMR0EnableCpu;
400 HWACCMR0Globals.pfnDisableCpu = SVMR0DisableCpu;
401 HWACCMR0Globals.pfnInitVM = SVMR0InitVM;
402 HWACCMR0Globals.pfnTermVM = SVMR0TermVM;
403 HWACCMR0Globals.pfnSetupVM = SVMR0SetupVM;
404 }
405
406 if (!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
407 {
408 rc = RTPowerNotificationRegister(hwaccmR0PowerCallback, 0);
409 AssertRC(rc);
410 }
411
412 return VINF_SUCCESS;
413}
414
415
416/**
417 * Checks the error code array filled in for each cpu in the system.
418 *
419 * @returns VBox status code.
420 * @param paRc Error code array
421 * @param cErrorCodes Array size
422 * @param pidCpu Value of the first cpu that set an error (out)
423 */
424static int hwaccmR0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu)
425{
426 int rc = VINF_SUCCESS;
427
428 Assert(cErrorCodes == RTCPUSET_MAX_CPUS);
429
430 for (unsigned i=0;i<cErrorCodes;i++)
431 {
432 if (RTMpIsCpuOnline(i))
433 {
434 if (RT_FAILURE(paRc[i]))
435 {
436 rc = paRc[i];
437 *pidCpu = i;
438 break;
439 }
440 }
441 }
442 return rc;
443}
444
445/**
446 * Does global Ring-0 HWACCM termination.
447 *
448 * @returns VBox status code.
449 */
450VMMR0DECL(int) HWACCMR0Term(void)
451{
452 int rc;
453 if ( HWACCMR0Globals.vmx.fSupported
454 && HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
455 {
456 rc = SUPR0EnableVTx(false /* fEnable */);
457 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); iCpu++)
458 {
459 HWACCMR0Globals.aCpuInfo[iCpu].fConfigured = false;
460 Assert(HWACCMR0Globals.aCpuInfo[iCpu].pMemObj == NIL_RTR0MEMOBJ);
461 }
462 }
463 else
464 {
465 int aRc[RTCPUSET_MAX_CPUS];
466
467 rc = RTPowerNotificationDeregister(hwaccmR0PowerCallback, 0);
468 Assert(RT_SUCCESS(rc));
469
470 memset(aRc, 0, sizeof(aRc));
471 rc = RTMpOnAll(hwaccmR0DisableCPU, aRc, NULL);
472 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
473
474 /* Free the per-cpu pages used for VT-x and AMD-V */
475 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
476 {
477 AssertMsgRC(aRc[i], ("hwaccmR0DisableCPU failed for cpu %d with rc=%d\n", i, aRc[i]));
478 if (HWACCMR0Globals.aCpuInfo[i].pMemObj != NIL_RTR0MEMOBJ)
479 {
480 RTR0MemObjFree(HWACCMR0Globals.aCpuInfo[i].pMemObj, false);
481 HWACCMR0Globals.aCpuInfo[i].pMemObj = NIL_RTR0MEMOBJ;
482 }
483 }
484 }
485 return rc;
486}
487
488
489/**
490 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
491 * is to be called on the target cpus.
492 *
493 * @param idCpu The identifier for the CPU the function is called on.
494 * @param pvUser1 The 1st user argument.
495 * @param pvUser2 The 2nd user argument.
496 */
497static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
498{
499 unsigned u32VendorEBX = (uintptr_t)pvUser1;
500 int *paRc = (int *)pvUser2;
501 uint64_t val;
502
503#ifdef LOG_ENABLED
504 SUPR0Printf("HWACCMR0InitCPU cpu %d\n", idCpu);
505#endif
506 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
507
508 if (u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX)
509 {
510 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
511
512 /*
513 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP.
514 * Once the lock bit is set, this MSR can no longer be modified.
515 */
516 if (!(val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)))
517 {
518 /* MSR is not yet locked; we can change it ourselves here */
519 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, HWACCMR0Globals.vmx.msr.feature_ctrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK);
520 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
521 }
522 if ( (val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
523 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
524 paRc[idCpu] = VINF_SUCCESS;
525 else
526 paRc[idCpu] = VERR_VMX_MSR_LOCKED_OR_DISABLED;
527 }
528 else
529 if (u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX)
530 {
531 /* Check if SVM is disabled */
532 val = ASMRdMsr(MSR_K8_VM_CR);
533 if (!(val & MSR_K8_VM_CR_SVM_DISABLE))
534 {
535 /* Turn on SVM in the EFER MSR. */
536 val = ASMRdMsr(MSR_K6_EFER);
537 if (!(val & MSR_K6_EFER_SVME))
538 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
539
540 /* Paranoia. */
541 val = ASMRdMsr(MSR_K6_EFER);
542 if (val & MSR_K6_EFER_SVME)
543 paRc[idCpu] = VINF_SUCCESS;
544 else
545 paRc[idCpu] = VERR_SVM_ILLEGAL_EFER_MSR;
546 }
547 else
548 paRc[idCpu] = HWACCMR0Globals.lLastError = VERR_SVM_DISABLED;
549 }
550 else
551 AssertFailed(); /* can't happen */
552 return;
553}
554
555
556/**
557 * Sets up HWACCM on all cpus.
558 *
559 * @returns VBox status code.
560 * @param pVM The VM to operate on.
561 * @param enmNewHwAccmState New hwaccm state
562 *
563 */
564VMMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM, HWACCMSTATE enmNewHwAccmState)
565{
566 Assert(sizeof(HWACCMR0Globals.enmHwAccmState) == sizeof(uint32_t));
567
568 /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
569 if (ASMAtomicReadBool(&HWACCMR0Globals.fSuspended))
570 return VERR_HWACCM_SUSPEND_PENDING;
571
572 if (ASMAtomicCmpXchgU32((volatile uint32_t *)&HWACCMR0Globals.enmHwAccmState, enmNewHwAccmState, HWACCMSTATE_UNINITIALIZED))
573 {
574 int rc;
575
576 /* Don't setup hwaccm as that might not work (vt-x & 64 bits raw mode) */
577 if (enmNewHwAccmState == HWACCMSTATE_DISABLED)
578 return VINF_SUCCESS;
579
580 if ( HWACCMR0Globals.vmx.fSupported
581 && HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
582 {
583 rc = SUPR0EnableVTx(true /* fEnable */);
584 if (RT_SUCCESS(rc))
585 {
586 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); iCpu++)
587 {
588 HWACCMR0Globals.aCpuInfo[iCpu].fConfigured = true;
589 Assert(HWACCMR0Globals.aCpuInfo[iCpu].pMemObj == NIL_RTR0MEMOBJ);
590 }
591 }
592 else
593 AssertMsgFailed(("HWACCMR0EnableAllCpus/SUPR0EnableVTx: rc=%Rrc\n", rc));
594 }
595 else
596 {
597 int aRc[RTCPUSET_MAX_CPUS];
598 RTCPUID idCpu = 0;
599
600 memset(aRc, 0, sizeof(aRc));
601
602 /* Allocate one page per cpu for the global vt-x and amd-v pages */
603 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
604 {
605 Assert(!HWACCMR0Globals.aCpuInfo[i].pMemObj);
606
607 /** @todo this is rather dangerous if cpus can be taken offline; we don't care for now */
608 if (RTMpIsCpuOnline(i))
609 {
610 rc = RTR0MemObjAllocCont(&HWACCMR0Globals.aCpuInfo[i].pMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
611 AssertRC(rc);
612 if (RT_FAILURE(rc))
613 return rc;
614
615 void *pvR0 = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[i].pMemObj);
616 Assert(pvR0);
617 ASMMemZeroPage(pvR0);
618
619#ifdef LOG_ENABLED
620 SUPR0Printf("address %x phys %x\n", pvR0, (uint32_t)RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[i].pMemObj, 0));
621#endif
622 }
623 }
624 /* First time, so initialize each cpu/core */
625 rc = RTMpOnAll(hwaccmR0EnableCPU, (void *)pVM, aRc);
626
627 /* Check the return code of all invocations. */
628 if (RT_SUCCESS(rc))
629 rc = hwaccmR0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
630 AssertMsgRC(rc, ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", idCpu, rc));
631 }
632
633 return rc;
634 }
635
636 if (HWACCMR0Globals.enmHwAccmState == enmNewHwAccmState)
637 return VINF_SUCCESS;
638
639 /* Request to change the mode is not allowed */
640 return VERR_ACCESS_DENIED;
641}
642
643/**
644 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
645 * is to be called on the target cpus.
646 *
647 * @param idCpu The identifier for the CPU the function is called on.
648 * @param pvUser1 The 1st user argument.
649 * @param pvUser2 The 2nd user argument.
650 */
651static DECLCALLBACK(void) hwaccmR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
652{
653 PVM pVM = (PVM)pvUser1; /* can be NULL! */
654 int *paRc = (int *)pvUser2;
655 void *pvPageCpu;
656 RTHCPHYS pPageCpuPhys;
657 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
658
659 Assert(!HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);
660 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
661 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
662 Assert(!pCpu->fConfigured);
663 Assert(ASMAtomicReadBool(&pCpu->fInUse) == false);
664
665 pCpu->idCpu = idCpu;
666
667 /* Make sure we start with a clean TLB. */
668 pCpu->fFlushTLB = true;
669
670 pCpu->uCurrentASID = 0; /* we'll aways increment this the first time (host uses ASID 0) */
671 pCpu->cTLBFlushes = 0;
672
673 /* Should never happen */
674 if (!pCpu->pMemObj)
675 {
676 AssertFailed();
677 paRc[idCpu] = VERR_INTERNAL_ERROR;
678 return;
679 }
680
681 pvPageCpu = RTR0MemObjAddress(pCpu->pMemObj);
682 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
683
684 paRc[idCpu] = HWACCMR0Globals.pfnEnableCpu(pCpu, pVM, pvPageCpu, pPageCpuPhys);
685 AssertRC(paRc[idCpu]);
686 if (RT_SUCCESS(paRc[idCpu]))
687 pCpu->fConfigured = true;
688
689 return;
690}
691
692/**
693 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
694 * is to be called on the target cpus.
695 *
696 * @param idCpu The identifier for the CPU the function is called on.
697 * @param pvUser1 The 1st user argument.
698 * @param pvUser2 The 2nd user argument.
699 */
700static DECLCALLBACK(void) hwaccmR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
701{
702 void *pvPageCpu;
703 RTHCPHYS pPageCpuPhys;
704 int *paRc = (int *)pvUser1;
705 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
706
707 Assert(!HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);
708 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
709 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
710 Assert(ASMAtomicReadBool(&pCpu->fInUse) == false);
711
712 if (!pCpu->pMemObj)
713 return;
714
715 pvPageCpu = RTR0MemObjAddress(pCpu->pMemObj);
716 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
717
718 if (pCpu->fConfigured)
719 {
720 paRc[idCpu] = HWACCMR0Globals.pfnDisableCpu(pCpu, pvPageCpu, pPageCpuPhys);
721 AssertRC(paRc[idCpu]);
722 pCpu->fConfigured = false;
723 }
724 else
725 paRc[idCpu] = VINF_SUCCESS; /* nothing to do */
726
727 pCpu->uCurrentASID = 0;
728 return;
729}
730
731/**
732 * Called whenever a system power state change occurs.
733 *
734 * @param enmEvent Power event
735 * @param pvUser User argument
736 */
737static DECLCALLBACK(void) hwaccmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser)
738{
739 NOREF(pvUser);
740 Assert(!HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);
741
742#ifdef LOG_ENABLED
743 if (enmEvent == RTPOWEREVENT_SUSPEND)
744 SUPR0Printf("hwaccmR0PowerCallback RTPOWEREVENT_SUSPEND\n");
745 else
746 SUPR0Printf("hwaccmR0PowerCallback RTPOWEREVENT_RESUME\n");
747#endif
748
749 if (enmEvent == RTPOWEREVENT_SUSPEND)
750 ASMAtomicWriteBool(&HWACCMR0Globals.fSuspended, true);
751
752 if (HWACCMR0Globals.enmHwAccmState == HWACCMSTATE_ENABLED)
753 {
754 int aRc[RTCPUSET_MAX_CPUS];
755 int rc;
756 RTCPUID idCpu;
757
758 memset(aRc, 0, sizeof(aRc));
759 if (enmEvent == RTPOWEREVENT_SUSPEND)
760 {
761 /* Turn off VT-x or AMD-V on all CPUs. */
762 rc = RTMpOnAll(hwaccmR0DisableCPU, aRc, NULL);
763 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
764 }
765 else
766 {
767 /* Reinit the CPUs from scratch as the suspend state has messed with the MSRs. */
768 rc = RTMpOnAll(HWACCMR0InitCPU, (void *)((HWACCMR0Globals.vmx.fSupported) ? X86_CPUID_VENDOR_INTEL_EBX : X86_CPUID_VENDOR_AMD_EBX), aRc);
769 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
770
771 if (RT_SUCCESS(rc))
772 rc = hwaccmR0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
773#ifdef LOG_ENABLED
774 if (RT_FAILURE(rc))
775 SUPR0Printf("hwaccmR0PowerCallback HWACCMR0InitCPU failed with %d\n", rc);
776#endif
777
778 /* Turn VT-x or AMD-V back on on all CPUs. */
779 rc = RTMpOnAll(hwaccmR0EnableCPU, NULL, aRc);
780 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
781 }
782 }
783 if (enmEvent == RTPOWEREVENT_RESUME)
784 ASMAtomicWriteBool(&HWACCMR0Globals.fSuspended, false);
785}
786
787
788/**
789 * Does Ring-0 per VM HWACCM initialization.
790 *
791 * This is mainly to check that the Host CPU mode is compatible
792 * with VMX.
793 *
794 * @returns VBox status code.
795 * @param pVM The VM to operate on.
796 */
797VMMR0DECL(int) HWACCMR0InitVM(PVM pVM)
798{
799 int rc;
800 RTCPUID idCpu = RTMpCpuId();
801 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
802
803 AssertReturn(pVM, VERR_INVALID_PARAMETER);
804
805#ifdef LOG_ENABLED
806 SUPR0Printf("HWACCMR0InitVM: %p\n", pVM);
807#endif
808
809 /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
810 if (ASMAtomicReadBool(&HWACCMR0Globals.fSuspended))
811 return VERR_HWACCM_SUSPEND_PENDING;
812
813 pVM->hwaccm.s.vmx.fSupported = HWACCMR0Globals.vmx.fSupported;
814 pVM->hwaccm.s.svm.fSupported = HWACCMR0Globals.svm.fSupported;
815
816 pVM->hwaccm.s.vmx.msr.feature_ctrl = HWACCMR0Globals.vmx.msr.feature_ctrl;
817 pVM->hwaccm.s.vmx.hostCR4 = HWACCMR0Globals.vmx.hostCR4;
818 pVM->hwaccm.s.vmx.msr.vmx_basic_info = HWACCMR0Globals.vmx.msr.vmx_basic_info;
819 pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = HWACCMR0Globals.vmx.msr.vmx_pin_ctls;
820 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = HWACCMR0Globals.vmx.msr.vmx_proc_ctls;
821 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2 = HWACCMR0Globals.vmx.msr.vmx_proc_ctls2;
822 pVM->hwaccm.s.vmx.msr.vmx_exit = HWACCMR0Globals.vmx.msr.vmx_exit;
823 pVM->hwaccm.s.vmx.msr.vmx_entry = HWACCMR0Globals.vmx.msr.vmx_entry;
824 pVM->hwaccm.s.vmx.msr.vmx_misc = HWACCMR0Globals.vmx.msr.vmx_misc;
825 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0;
826 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1;
827 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0;
828 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1;
829 pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = HWACCMR0Globals.vmx.msr.vmx_vmcs_enum;
830 pVM->hwaccm.s.vmx.msr.vmx_eptcaps = HWACCMR0Globals.vmx.msr.vmx_eptcaps;
831 pVM->hwaccm.s.svm.u32Rev = HWACCMR0Globals.svm.u32Rev;
832 pVM->hwaccm.s.svm.u32Features = HWACCMR0Globals.svm.u32Features;
833 pVM->hwaccm.s.cpuid.u32AMDFeatureECX = HWACCMR0Globals.cpuid.u32AMDFeatureECX;
834 pVM->hwaccm.s.cpuid.u32AMDFeatureEDX = HWACCMR0Globals.cpuid.u32AMDFeatureEDX;
835 pVM->hwaccm.s.lLastError = HWACCMR0Globals.lLastError;
836
837 pVM->hwaccm.s.uMaxASID = HWACCMR0Globals.uMaxASID;
838
839 for (unsigned i=0;i<pVM->cCPUs;i++)
840 {
841 PVMCPU pVCpu = &pVM->aCpus[i];
842
843 pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID;
844
845 /* Invalidate the last cpu we were running on. */
846 pVCpu->hwaccm.s.idLastCpu = NIL_RTCPUID;
847
848 /* we'll aways increment this the first time (host uses ASID 0) */
849 pVCpu->hwaccm.s.uCurrentASID = 0;
850 }
851
852 ASMAtomicWriteBool(&pCpu->fInUse, true);
853
854 /* Init a VT-x or AMD-V VM. */
855 rc = HWACCMR0Globals.pfnInitVM(pVM);
856
857 ASMAtomicWriteBool(&pCpu->fInUse, false);
858
859 return rc;
860}
861
862
863/**
864 * Does Ring-0 per VM HWACCM termination.
865 *
866 * @returns VBox status code.
867 * @param pVM The VM to operate on.
868 */
869VMMR0DECL(int) HWACCMR0TermVM(PVM pVM)
870{
871 int rc;
872 RTCPUID idCpu = RTMpCpuId();
873 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
874
875 AssertReturn(pVM, VERR_INVALID_PARAMETER);
876
877#ifdef LOG_ENABLED
878 SUPR0Printf("HWACCMR0TermVM: %p\n", pVM);
879#endif
880
881 /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
882 AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
883
884 ASMAtomicWriteBool(&pCpu->fInUse, true);
885
886 /* Terminate a VT-x or AMD-V VM. */
887 rc = HWACCMR0Globals.pfnTermVM(pVM);
888
889 ASMAtomicWriteBool(&pCpu->fInUse, false);
890 return rc;
891}
892
893
894/**
895 * Sets up a VT-x or AMD-V session
896 *
897 * @returns VBox status code.
898 * @param pVM The VM to operate on.
899 */
900VMMR0DECL(int) HWACCMR0SetupVM(PVM pVM)
901{
902 int rc;
903 RTCPUID idCpu = RTMpCpuId();
904 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
905
906 AssertReturn(pVM, VERR_INVALID_PARAMETER);
907
908 /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
909 AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
910
911#ifdef LOG_ENABLED
912 SUPR0Printf("HWACCMR0SetupVM: %p\n", pVM);
913#endif
914
915 ASMAtomicWriteBool(&pCpu->fInUse, true);
916
917 for (unsigned i=0;i<pVM->cCPUs;i++)
918 {
919 /* On first entry we'll sync everything. */
920 pVM->aCpus[i].hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
921 }
922
923 /* Setup VT-x or AMD-V. */
924 rc = HWACCMR0Globals.pfnSetupVM(pVM);
925
926 ASMAtomicWriteBool(&pCpu->fInUse, false);
927
928 return rc;
929}
930
931
932/**
933 * Enters the VT-x or AMD-V session
934 *
935 * @returns VBox status code.
936 * @param pVM The VM to operate on.
937 * @param pVCpu VMCPUD id.
938 */
939VMMR0DECL(int) HWACCMR0Enter(PVM pVM, PVMCPU pVCpu)
940{
941 PCPUMCTX pCtx;
942 int rc;
943 RTCPUID idCpu = RTMpCpuId();
944 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
945
946 /* Make sure we can't enter a session after we've disabled hwaccm in preparation of a suspend. */
947 AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
948 ASMAtomicWriteBool(&pCpu->fInUse, true);
949
950 pCtx = CPUMQueryGuestCtxPtrEx(pVM, pVCpu);
951
952 /* Always load the guest's FPU/XMM state on-demand. */
953 CPUMDeactivateGuestFPUState(pVM);
954
955 /* Always load the guest's debug state on-demand. */
956 CPUMDeactivateGuestDebugState(pVM);
957
958 /* Always reload the host context and the guest's CR0 register. (!!!!) */
959 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
960
961 /* Setup the register and mask according to the current execution mode. */
962 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
963 pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFFFFFFFFFF);
964 else
965 pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFF);
966
967 rc = HWACCMR0Globals.pfnEnterSession(pVM, pVCpu, pCpu);
968 AssertRC(rc);
969 /* We must save the host context here (VT-x) as we might be rescheduled on a different cpu after a long jump back to ring 3. */
970 rc |= HWACCMR0Globals.pfnSaveHostState(pVM, pVCpu);
971 AssertRC(rc);
972 rc |= HWACCMR0Globals.pfnLoadGuestState(pVM, pVCpu, pCtx);
973 AssertRC(rc);
974
975 /* keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */
976 if (RT_SUCCESS(rc))
977 {
978 AssertMsg(pVCpu->hwaccm.s.idEnteredCpu == NIL_RTCPUID, ("%d", (int)pVCpu->hwaccm.s.idEnteredCpu));
979 pVCpu->hwaccm.s.idEnteredCpu = idCpu;
980
981#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
982 PGMDynMapMigrateAutoSet(pVCpu);
983#endif
984 }
985 return rc;
986}
987
988
989/**
990 * Leaves the VT-x or AMD-V session
991 *
992 * @returns VBox status code.
993 * @param pVM The VM to operate on.
994 * @param pVCpu VMCPUD id.
995 */
996VMMR0DECL(int) HWACCMR0Leave(PVM pVM, PVMCPU pVCpu)
997{
998 PCPUMCTX pCtx;
999 int rc;
1000 RTCPUID idCpu = RTMpCpuId();
1001 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
1002
1003 AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
1004
1005 pCtx = CPUMQueryGuestCtxPtrEx(pVM, pVCpu);
1006
1007 /* Note: It's rather tricky with longjmps done by e.g. Log statements or the page fault handler.
1008 * We must restore the host FPU here to make absolutely sure we don't leave the guest FPU state active
1009 * or trash somebody else's FPU state.
1010 */
1011 /* Save the guest FPU and XMM state if necessary. */
1012 if (CPUMIsGuestFPUStateActive(pVCpu))
1013 {
1014 Log2(("CPUMR0SaveGuestFPU\n"));
1015 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
1016
1017 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1018 }
1019
1020 rc = HWACCMR0Globals.pfnLeaveSession(pVM, pVCpu, pCtx);
1021
1022 /* keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */
1023#ifdef RT_STRICT
1024 if (RT_UNLIKELY( pVCpu->hwaccm.s.idEnteredCpu != idCpu
1025 && RT_FAILURE(rc)))
1026 {
1027 AssertMsgFailed(("Owner is %d, I'm %d", (int)pVCpu->hwaccm.s.idEnteredCpu, (int)idCpu));
1028 rc = VERR_INTERNAL_ERROR;
1029 }
1030#endif
1031 pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID;
1032
1033 ASMAtomicWriteBool(&pCpu->fInUse, false);
1034 return rc;
1035}
1036
1037/**
1038 * Runs guest code in a hardware accelerated VM.
1039 *
1040 * @returns VBox status code.
1041 * @param pVM The VM to operate on.
1042 * @param pVCpu VMCPUD id.
1043 */
1044VMMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM, PVMCPU pVCpu)
1045{
1046 CPUMCTX *pCtx;
1047 RTCPUID idCpu = RTMpCpuId(); NOREF(idCpu);
1048 int rc;
1049#ifdef VBOX_STRICT
1050 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
1051#endif
1052
1053 Assert(!VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL));
1054 Assert(HWACCMR0Globals.aCpuInfo[idCpu].fConfigured);
1055 AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
1056 Assert(ASMAtomicReadBool(&pCpu->fInUse) == true);
1057
1058#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1059 PGMDynMapStartAutoSet(pVCpu);
1060#endif
1061
1062 pCtx = CPUMQueryGuestCtxPtrEx(pVM, pVCpu);
1063
1064 rc = HWACCMR0Globals.pfnRunGuestCode(pVM, pVCpu, pCtx);
1065
1066#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1067 PGMDynMapReleaseAutoSet(pVCpu);
1068#endif
1069 return rc;
1070}
1071
1072
1073#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1074/**
1075 * Save guest FPU/XMM state (64 bits guest mode & 32 bits host only)
1076 *
1077 * @returns VBox status code.
1078 * @param pVM VM handle.
1079 * @param pVCpu VMCPU handle.
1080 * @param pCtx CPU context
1081 */
1082VMMR0DECL(int) HWACCMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1083{
1084 if (pVM->hwaccm.s.vmx.fSupported)
1085 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestFPU64, 0, NULL);
1086
1087 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestFPU64, 0, NULL);
1088}
1089
1090/**
1091 * Save guest debug state (64 bits guest mode & 32 bits host only)
1092 *
1093 * @returns VBox status code.
1094 * @param pVM VM handle.
1095 * @param pVCpu VMCPU handle.
1096 * @param pCtx CPU context
1097 */
1098VMMR0DECL(int) HWACCMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1099{
1100 if (pVM->hwaccm.s.vmx.fSupported)
1101 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestDebug64, 0, NULL);
1102
1103 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestDebug64, 0, NULL);
1104}
1105
1106# ifdef DEBUG
1107/**
1108 * Test the 32->64 bits switcher
1109 *
1110 * @returns VBox status code.
1111 * @param pVM VM handle.
1112 */
1113VMMR0DECL(int) HWACCMR0TestSwitcher3264(PVM pVM)
1114{
1115 PVMCPU pVCpu = &pVM->aCpus[0];
1116 CPUMCTX *pCtx;
1117 uint32_t aParam[5] = {0, 1, 2, 3, 4};
1118
1119 pCtx = CPUMQueryGuestCtxPtrEx(pVM, pVCpu);
1120
1121 if (pVM->hwaccm.s.vmx.fSupported)
1122 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnTest64, 5, &aParam[0]);
1123
1124 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnTest64, 5, &aParam[0]);
1125}
1126# endif
1127
1128#endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
1129
1130/**
1131 * Returns suspend status of the host
1132 *
1133 * @returns Suspend pending or not
1134 */
1135VMMR0DECL(bool) HWACCMR0SuspendPending()
1136{
1137 return ASMAtomicReadBool(&HWACCMR0Globals.fSuspended);
1138}
1139
1140/**
1141 * Returns the cpu structure for the current cpu.
1142 * Keep in mind that there is no guarantee it will stay the same (long jumps to ring 3!!!).
1143 *
1144 * @returns cpu structure pointer
1145 */
1146VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpu()
1147{
1148 RTCPUID idCpu = RTMpCpuId();
1149
1150 return &HWACCMR0Globals.aCpuInfo[idCpu];
1151}
1152
1153/**
1154 * Returns the cpu structure for the current cpu.
1155 * Keep in mind that there is no guarantee it will stay the same (long jumps to ring 3!!!).
1156 *
1157 * @returns cpu structure pointer
1158 * @param idCpu id of the VCPU
1159 */
1160VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu)
1161{
1162 return &HWACCMR0Globals.aCpuInfo[idCpu];
1163}
1164
1165#ifdef VBOX_STRICT
1166# include <iprt/string.h>
1167/**
1168 * Dumps a descriptor.
1169 *
1170 * @param pDesc Descriptor to dump.
1171 * @param Sel Selector number.
1172 * @param pszMsg Message to prepend the log entry with.
1173 */
1174VMMR0DECL(void) HWACCMR0DumpDescriptor(PX86DESCHC pDesc, RTSEL Sel, const char *pszMsg)
1175{
1176 /*
1177 * Make variable description string.
1178 */
1179 static struct
1180 {
1181 unsigned cch;
1182 const char *psz;
1183 } const aTypes[32] =
1184 {
1185# define STRENTRY(str) { sizeof(str) - 1, str }
1186
1187 /* system */
1188# if HC_ARCH_BITS == 64
1189 STRENTRY("Reserved0 "), /* 0x00 */
1190 STRENTRY("Reserved1 "), /* 0x01 */
1191 STRENTRY("LDT "), /* 0x02 */
1192 STRENTRY("Reserved3 "), /* 0x03 */
1193 STRENTRY("Reserved4 "), /* 0x04 */
1194 STRENTRY("Reserved5 "), /* 0x05 */
1195 STRENTRY("Reserved6 "), /* 0x06 */
1196 STRENTRY("Reserved7 "), /* 0x07 */
1197 STRENTRY("Reserved8 "), /* 0x08 */
1198 STRENTRY("TSS64Avail "), /* 0x09 */
1199 STRENTRY("ReservedA "), /* 0x0a */
1200 STRENTRY("TSS64Busy "), /* 0x0b */
1201 STRENTRY("Call64 "), /* 0x0c */
1202 STRENTRY("ReservedD "), /* 0x0d */
1203 STRENTRY("Int64 "), /* 0x0e */
1204 STRENTRY("Trap64 "), /* 0x0f */
1205# else
1206 STRENTRY("Reserved0 "), /* 0x00 */
1207 STRENTRY("TSS16Avail "), /* 0x01 */
1208 STRENTRY("LDT "), /* 0x02 */
1209 STRENTRY("TSS16Busy "), /* 0x03 */
1210 STRENTRY("Call16 "), /* 0x04 */
1211 STRENTRY("Task "), /* 0x05 */
1212 STRENTRY("Int16 "), /* 0x06 */
1213 STRENTRY("Trap16 "), /* 0x07 */
1214 STRENTRY("Reserved8 "), /* 0x08 */
1215 STRENTRY("TSS32Avail "), /* 0x09 */
1216 STRENTRY("ReservedA "), /* 0x0a */
1217 STRENTRY("TSS32Busy "), /* 0x0b */
1218 STRENTRY("Call32 "), /* 0x0c */
1219 STRENTRY("ReservedD "), /* 0x0d */
1220 STRENTRY("Int32 "), /* 0x0e */
1221 STRENTRY("Trap32 "), /* 0x0f */
1222# endif
1223 /* non system */
1224 STRENTRY("DataRO "), /* 0x10 */
1225 STRENTRY("DataRO Accessed "), /* 0x11 */
1226 STRENTRY("DataRW "), /* 0x12 */
1227 STRENTRY("DataRW Accessed "), /* 0x13 */
1228 STRENTRY("DataDownRO "), /* 0x14 */
1229 STRENTRY("DataDownRO Accessed "), /* 0x15 */
1230 STRENTRY("DataDownRW "), /* 0x16 */
1231 STRENTRY("DataDownRW Accessed "), /* 0x17 */
1232 STRENTRY("CodeEO "), /* 0x18 */
1233 STRENTRY("CodeEO Accessed "), /* 0x19 */
1234 STRENTRY("CodeER "), /* 0x1a */
1235 STRENTRY("CodeER Accessed "), /* 0x1b */
1236 STRENTRY("CodeConfEO "), /* 0x1c */
1237 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
1238 STRENTRY("CodeConfER "), /* 0x1e */
1239 STRENTRY("CodeConfER Accessed ") /* 0x1f */
1240# undef SYSENTRY
1241 };
1242# define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
1243 char szMsg[128];
1244 char *psz = &szMsg[0];
1245 unsigned i = pDesc->Gen.u1DescType << 4 | pDesc->Gen.u4Type;
1246 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
1247 psz += aTypes[i].cch;
1248
1249 if (pDesc->Gen.u1Present)
1250 ADD_STR(psz, "Present ");
1251 else
1252 ADD_STR(psz, "Not-Present ");
1253# if HC_ARCH_BITS == 64
1254 if (pDesc->Gen.u1Long)
1255 ADD_STR(psz, "64-bit ");
1256 else
1257 ADD_STR(psz, "Comp ");
1258# else
1259 if (pDesc->Gen.u1Granularity)
1260 ADD_STR(psz, "Page ");
1261 if (pDesc->Gen.u1DefBig)
1262 ADD_STR(psz, "32-bit ");
1263 else
1264 ADD_STR(psz, "16-bit ");
1265# endif
1266# undef ADD_STR
1267 *psz = '\0';
1268
1269 /*
1270 * Limit and Base and format the output.
1271 */
1272 uint32_t u32Limit = X86DESC_LIMIT(*pDesc);
1273 if (pDesc->Gen.u1Granularity)
1274 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
1275
1276# if HC_ARCH_BITS == 64
1277 uint64_t u32Base = X86DESC64_BASE(*pDesc);
1278
1279 Log(("%s %04x - %RX64 %RX64 - base=%RX64 limit=%08x dpl=%d %s\n", pszMsg,
1280 Sel, pDesc->au64[0], pDesc->au64[1], u32Base, u32Limit, pDesc->Gen.u2Dpl, szMsg));
1281# else
1282 uint32_t u32Base = X86DESC_BASE(*pDesc);
1283
1284 Log(("%s %04x - %08x %08x - base=%08x limit=%08x dpl=%d %s\n", pszMsg,
1285 Sel, pDesc->au32[0], pDesc->au32[1], u32Base, u32Limit, pDesc->Gen.u2Dpl, szMsg));
1286# endif
1287}
1288
1289/**
1290 * Formats a full register dump.
1291 *
1292 * @param pVM The VM to operate on.
1293 * @param pCtx The context to format.
1294 */
1295VMMR0DECL(void) HWACCMDumpRegs(PVM pVM, PCPUMCTX pCtx)
1296{
1297 /*
1298 * Format the flags.
1299 */
1300 static struct
1301 {
1302 const char *pszSet; const char *pszClear; uint32_t fFlag;
1303 } aFlags[] =
1304 {
1305 { "vip",NULL, X86_EFL_VIP },
1306 { "vif",NULL, X86_EFL_VIF },
1307 { "ac", NULL, X86_EFL_AC },
1308 { "vm", NULL, X86_EFL_VM },
1309 { "rf", NULL, X86_EFL_RF },
1310 { "nt", NULL, X86_EFL_NT },
1311 { "ov", "nv", X86_EFL_OF },
1312 { "dn", "up", X86_EFL_DF },
1313 { "ei", "di", X86_EFL_IF },
1314 { "tf", NULL, X86_EFL_TF },
1315 { "nt", "pl", X86_EFL_SF },
1316 { "nz", "zr", X86_EFL_ZF },
1317 { "ac", "na", X86_EFL_AF },
1318 { "po", "pe", X86_EFL_PF },
1319 { "cy", "nc", X86_EFL_CF },
1320 };
1321 char szEFlags[80];
1322 char *psz = szEFlags;
1323 uint32_t efl = pCtx->eflags.u32;
1324 for (unsigned i = 0; i < RT_ELEMENTS(aFlags); i++)
1325 {
1326 const char *pszAdd = aFlags[i].fFlag & efl ? aFlags[i].pszSet : aFlags[i].pszClear;
1327 if (pszAdd)
1328 {
1329 strcpy(psz, pszAdd);
1330 psz += strlen(pszAdd);
1331 *psz++ = ' ';
1332 }
1333 }
1334 psz[-1] = '\0';
1335
1336
1337 /*
1338 * Format the registers.
1339 */
1340 if (CPUMIsGuestIn64BitCode(pVM, CPUMCTX2CORE(pCtx)))
1341 {
1342 Log(("rax=%016RX64 rbx=%016RX64 rcx=%016RX64 rdx=%016RX64\n"
1343 "rsi=%016RX64 rdi=%016RX64 r8 =%016RX64 r9 =%016RX64\n"
1344 "r10=%016RX64 r11=%016RX64 r12=%016RX64 r13=%016RX64\n"
1345 "r14=%016RX64 r15=%016RX64\n"
1346 "rip=%016RX64 rsp=%016RX64 rbp=%016RX64 iopl=%d %*s\n"
1347 "cs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1348 "ds={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1349 "es={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1350 "fs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1351 "gs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1352 "ss={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1353 "cr0=%016RX64 cr2=%016RX64 cr3=%016RX64 cr4=%016RX64\n"
1354 "dr0=%016RX64 dr1=%016RX64 dr2=%016RX64 dr3=%016RX64\n"
1355 "dr4=%016RX64 dr5=%016RX64 dr6=%016RX64 dr7=%016RX64\n"
1356 "gdtr=%016RX64:%04x idtr=%016RX64:%04x eflags=%08x\n"
1357 "ldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1358 "tr ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1359 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1360 ,
1361 pCtx->rax, pCtx->rbx, pCtx->rcx, pCtx->rdx, pCtx->rsi, pCtx->rdi,
1362 pCtx->r8, pCtx->r9, pCtx->r10, pCtx->r11, pCtx->r12, pCtx->r13,
1363 pCtx->r14, pCtx->r15,
1364 pCtx->rip, pCtx->rsp, pCtx->rbp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
1365 (RTSEL)pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u,
1366 (RTSEL)pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u,
1367 (RTSEL)pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u,
1368 (RTSEL)pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u,
1369 (RTSEL)pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u,
1370 (RTSEL)pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u,
1371 pCtx->cr0, pCtx->cr2, pCtx->cr3, pCtx->cr4,
1372 pCtx->dr[0], pCtx->dr[1], pCtx->dr[2], pCtx->dr[3],
1373 pCtx->dr[4], pCtx->dr[5], pCtx->dr[6], pCtx->dr[7],
1374 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
1375 (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1376 (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1377 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp));
1378 }
1379 else
1380 Log(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
1381 "eip=%08x esp=%08x ebp=%08x iopl=%d %*s\n"
1382 "cs={%04x base=%016RX64 limit=%08x flags=%08x} dr0=%08RX64 dr1=%08RX64\n"
1383 "ds={%04x base=%016RX64 limit=%08x flags=%08x} dr2=%08RX64 dr3=%08RX64\n"
1384 "es={%04x base=%016RX64 limit=%08x flags=%08x} dr4=%08RX64 dr5=%08RX64\n"
1385 "fs={%04x base=%016RX64 limit=%08x flags=%08x} dr6=%08RX64 dr7=%08RX64\n"
1386 "gs={%04x base=%016RX64 limit=%08x flags=%08x} cr0=%08RX64 cr2=%08RX64\n"
1387 "ss={%04x base=%016RX64 limit=%08x flags=%08x} cr3=%08RX64 cr4=%08RX64\n"
1388 "gdtr=%016RX64:%04x idtr=%016RX64:%04x eflags=%08x\n"
1389 "ldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1390 "tr ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1391 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1392 ,
1393 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
1394 pCtx->eip, pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
1395 (RTSEL)pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pCtx->dr[0], pCtx->dr[1],
1396 (RTSEL)pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pCtx->dr[2], pCtx->dr[3],
1397 (RTSEL)pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pCtx->dr[4], pCtx->dr[5],
1398 (RTSEL)pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pCtx->dr[6], pCtx->dr[7],
1399 (RTSEL)pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pCtx->cr0, pCtx->cr2,
1400 (RTSEL)pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pCtx->cr3, pCtx->cr4,
1401 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
1402 (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1403 (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1404 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp));
1405
1406 Log(("FPU:\n"
1407 "FCW=%04x FSW=%04x FTW=%02x\n"
1408 "res1=%02x FOP=%04x FPUIP=%08x CS=%04x Rsvrd1=%04x\n"
1409 "FPUDP=%04x DS=%04x Rsvrd2=%04x MXCSR=%08x MXCSR_MASK=%08x\n"
1410 ,
1411 pCtx->fpu.FCW, pCtx->fpu.FSW, pCtx->fpu.FTW,
1412 pCtx->fpu.huh1, pCtx->fpu.FOP, pCtx->fpu.FPUIP, pCtx->fpu.CS, pCtx->fpu.Rsvrd1,
1413 pCtx->fpu.FPUDP, pCtx->fpu.DS, pCtx->fpu.Rsrvd2,
1414 pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK));
1415
1416
1417 Log(("MSR:\n"
1418 "EFER =%016RX64\n"
1419 "PAT =%016RX64\n"
1420 "STAR =%016RX64\n"
1421 "CSTAR =%016RX64\n"
1422 "LSTAR =%016RX64\n"
1423 "SFMASK =%016RX64\n"
1424 "KERNELGSBASE =%016RX64\n",
1425 pCtx->msrEFER,
1426 pCtx->msrPAT,
1427 pCtx->msrSTAR,
1428 pCtx->msrCSTAR,
1429 pCtx->msrLSTAR,
1430 pCtx->msrSFMASK,
1431 pCtx->msrKERNELGSBASE));
1432
1433}
1434#endif /* VBOX_STRICT */
1435
1436/* Dummy callback handlers. */
1437VMMR0DECL(int) HWACCMR0DummyEnter(PVM pVM, PVMCPU pVCpu, PHWACCM_CPUINFO pCpu)
1438{
1439 return VINF_SUCCESS;
1440}
1441
1442VMMR0DECL(int) HWACCMR0DummyLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1443{
1444 return VINF_SUCCESS;
1445}
1446
1447VMMR0DECL(int) HWACCMR0DummyEnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
1448{
1449 return VINF_SUCCESS;
1450}
1451
1452VMMR0DECL(int) HWACCMR0DummyDisableCpu(PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
1453{
1454 return VINF_SUCCESS;
1455}
1456
1457VMMR0DECL(int) HWACCMR0DummyInitVM(PVM pVM)
1458{
1459 return VINF_SUCCESS;
1460}
1461
1462VMMR0DECL(int) HWACCMR0DummyTermVM(PVM pVM)
1463{
1464 return VINF_SUCCESS;
1465}
1466
1467VMMR0DECL(int) HWACCMR0DummySetupVM(PVM pVM)
1468{
1469 return VINF_SUCCESS;
1470}
1471
1472VMMR0DECL(int) HWACCMR0DummyRunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1473{
1474 return VINF_SUCCESS;
1475}
1476
1477VMMR0DECL(int) HWACCMR0DummySaveHostState(PVM pVM, PVMCPU pVCpu)
1478{
1479 return VINF_SUCCESS;
1480}
1481
1482VMMR0DECL(int) HWACCMR0DummyLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1483{
1484 return VINF_SUCCESS;
1485}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette