VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp@ 8863

Last change on this file since 8863 was 8853, checked in by vboxsync, 17 years ago

Manual page invalidation or TLB flush is required for AMD-V.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 37.0 KB
Line 
1/* $Id: HWACCMR0.cpp 8853 2008-05-15 13:57:31Z vboxsync $ */
2/** @file
3 * HWACCM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_HWACCM
27#include <VBox/hwaccm.h>
28#include "HWACCMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/x86.h>
31#include <VBox/hwacc_vmx.h>
32#include <VBox/hwacc_svm.h>
33#include <VBox/pgm.h>
34#include <VBox/pdm.h>
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <VBox/selm.h>
38#include <VBox/iom.h>
39#include <iprt/param.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <iprt/memobj.h>
44#include <iprt/cpuset.h>
45#include "HWVMXR0.h"
46#include "HWSVMR0.h"
47
48/*******************************************************************************
49* Internal Functions *
50*******************************************************************************/
51static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
52static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
53static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
54static int hwaccmr0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu);
55
56/*******************************************************************************
57* Local Variables *
58*******************************************************************************/
59static struct
60{
61 struct
62 {
63 RTR0MEMOBJ pMemObj;
64 bool fVMXConfigured;
65 bool fSVMConfigured;
66 } aCpuInfo[RTCPUSET_MAX_CPUS];
67
68 struct
69 {
70 /** Set by the ring-0 driver to indicate VMX is supported by the CPU. */
71 bool fSupported;
72
73 /** Host CR4 value (set by ring-0 VMX init) */
74 uint64_t hostCR4;
75
76 /** VMX MSR values */
77 struct
78 {
79 uint64_t feature_ctrl;
80 uint64_t vmx_basic_info;
81 uint64_t vmx_pin_ctls;
82 uint64_t vmx_proc_ctls;
83 uint64_t vmx_exit;
84 uint64_t vmx_entry;
85 uint64_t vmx_misc;
86 uint64_t vmx_cr0_fixed0;
87 uint64_t vmx_cr0_fixed1;
88 uint64_t vmx_cr4_fixed0;
89 uint64_t vmx_cr4_fixed1;
90 uint64_t vmx_vmcs_enum;
91 } msr;
92 /* Last instruction error */
93 uint32_t ulLastInstrError;
94 } vmx;
95 struct
96 {
97 /** Set by the ring-0 driver to indicate SVM is supported by the CPU. */
98 bool fSupported;
99
100 /** SVM revision. */
101 uint32_t u32Rev;
102
103 /** Maximum ASID allowed. */
104 uint32_t u32MaxASID;
105 } svm;
106 /** Saved error from detection */
107 int32_t lLastError;
108
109 struct
110 {
111 uint32_t u32AMDFeatureECX;
112 uint32_t u32AMDFeatureEDX;
113 } cpuid;
114
115 HWACCMSTATE enmHwAccmState;
116} HWACCMR0Globals;
117
118
119
120/**
121 * Does global Ring-0 HWACCM initialization.
122 *
123 * @returns VBox status code.
124 */
125HWACCMR0DECL(int) HWACCMR0Init()
126{
127 int rc;
128
129 memset(&HWACCMR0Globals, 0, sizeof(HWACCMR0Globals));
130 HWACCMR0Globals.enmHwAccmState = HWACCMSTATE_UNINITIALIZED;
131
132#ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL /* paranoia */
133
134 /*
135 * Check for VT-x and AMD-V capabilities
136 */
137 if (ASMHasCpuId())
138 {
139 uint32_t u32FeaturesECX;
140 uint32_t u32Dummy;
141 uint32_t u32FeaturesEDX;
142 uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX;
143
144 ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);
145 ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
146 /* Query AMD features. */
147 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &HWACCMR0Globals.cpuid.u32AMDFeatureECX, &HWACCMR0Globals.cpuid.u32AMDFeatureEDX);
148
149 if ( u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX
150 && u32VendorECX == X86_CPUID_VENDOR_INTEL_ECX
151 && u32VendorEDX == X86_CPUID_VENDOR_INTEL_EDX
152 )
153 {
154 /*
155 * Read all VMX MSRs if VMX is available. (same goes for RDMSR/WRMSR)
156 * We also assume all VMX-enabled CPUs support fxsave/fxrstor.
157 */
158 if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
159 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
160 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
161 )
162 {
163 int aRc[RTCPUSET_MAX_CPUS];
164 RTCPUID idCpu = 0;
165
166 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
167
168 /* We need to check if VT-x has been properly initialized on all CPUs. Some BIOSes do a lousy job. */
169 memset(aRc, 0, sizeof(aRc));
170 HWACCMR0Globals.lLastError = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
171
172 /* Check the return code of all invocations. */
173 if (VBOX_SUCCESS(HWACCMR0Globals.lLastError))
174 HWACCMR0Globals.lLastError = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
175
176 if (VBOX_SUCCESS(HWACCMR0Globals.lLastError))
177 {
178 /* Reread in case we've changed it. */
179 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
180
181 if ( (HWACCMR0Globals.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
182 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
183 {
184 RTR0MEMOBJ pScatchMemObj;
185 void *pvScatchPage;
186 RTHCPHYS pScatchPagePhys;
187
188 HWACCMR0Globals.vmx.fSupported = true;
189 HWACCMR0Globals.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
190 HWACCMR0Globals.vmx.msr.vmx_pin_ctls = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
191 HWACCMR0Globals.vmx.msr.vmx_proc_ctls = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
192 HWACCMR0Globals.vmx.msr.vmx_exit = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
193 HWACCMR0Globals.vmx.msr.vmx_entry = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
194 HWACCMR0Globals.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC);
195 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
196 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
197 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
198 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
199 HWACCMR0Globals.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
200 HWACCMR0Globals.vmx.hostCR4 = ASMGetCR4();
201
202 rc = RTR0MemObjAllocCont(&pScatchMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
203 if (RT_FAILURE(rc))
204 return rc;
205
206 pvScatchPage = RTR0MemObjAddress(pScatchMemObj);
207 pScatchPagePhys = RTR0MemObjGetPagePhysAddr(pScatchMemObj, 0);
208 memset(pvScatchPage, 0, PAGE_SIZE);
209
210 /* Set revision dword at the beginning of the structure. */
211 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(HWACCMR0Globals.vmx.msr.vmx_basic_info);
212
213 /* Make sure we don't get rescheduled to another cpu during this probe. */
214 RTCCUINTREG fFlags = ASMIntDisableFlags();
215
216 /*
217 * Check CR4.VMXE
218 */
219 if (!(HWACCMR0Globals.vmx.hostCR4 & X86_CR4_VMXE))
220 {
221 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we
222 * try to execute the VMX instructions...
223 */
224 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4 | X86_CR4_VMXE);
225 }
226
227 /* Enter VMX Root Mode */
228 rc = VMXEnable(pScatchPagePhys);
229 if (VBOX_FAILURE(rc))
230 {
231 /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because
232 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify this bit)
233 * (b) turning off paging causes a #GP (unavoidable when switching from long to 32 bits mode or 32 bits to PAE)
234 *
235 * They should fix their code, but until they do we simply refuse to run.
236 */
237 HWACCMR0Globals.lLastError = VERR_VMX_IN_VMX_ROOT_MODE;
238 HWACCMR0Globals.vmx.fSupported = false;
239 }
240 else
241 VMXDisable();
242
243 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it wasn't so before (some software could incorrectly think it's in VMX mode) */
244 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4);
245 ASMSetFlags(fFlags);
246
247 RTR0MemObjFree(pScatchMemObj, false);
248 if (VBOX_FAILURE(HWACCMR0Globals.lLastError))
249 return HWACCMR0Globals.lLastError ;
250 }
251 else
252 {
253 AssertFailed(); /* can't hit this case anymore */
254 HWACCMR0Globals.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
255 }
256 }
257#ifdef LOG_ENABLED
258 else
259 SUPR0Printf("HWACCMR0InitCPU failed with rc=%d\n", HWACCMR0Globals.lLastError);
260#endif
261 }
262 else
263 HWACCMR0Globals.lLastError = VERR_VMX_NO_VMX;
264 }
265 else
266 if ( u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX
267 && u32VendorECX == X86_CPUID_VENDOR_AMD_ECX
268 && u32VendorEDX == X86_CPUID_VENDOR_AMD_EDX
269 )
270 {
271 /*
272 * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
273 * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
274 */
275 if ( (HWACCMR0Globals.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
276 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
277 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
278 )
279 {
280 int aRc[RTCPUSET_MAX_CPUS];
281 RTCPUID idCpu = 0;
282
283 /* We need to check if AMD-V has been properly initialized on all CPUs. Some BIOSes might do a poor job. */
284 memset(aRc, 0, sizeof(aRc));
285 rc = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
286 AssertRC(rc);
287
288 /* Check the return code of all invocations. */
289 if (VBOX_SUCCESS(rc))
290 rc = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
291
292 AssertMsg(VBOX_SUCCESS(rc), ("HWACCMR0InitCPU failed for cpu %d with rc=%d\n", idCpu, rc));
293
294 if (VBOX_SUCCESS(rc))
295 {
296 /* Query AMD features. */
297 ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.svm.u32MaxASID, &u32Dummy, &u32Dummy);
298
299 HWACCMR0Globals.svm.fSupported = true;
300 }
301 else
302 HWACCMR0Globals.lLastError = rc;
303 }
304 else
305 HWACCMR0Globals.lLastError = VERR_SVM_NO_SVM;
306 }
307 else
308 HWACCMR0Globals.lLastError = VERR_HWACCM_UNKNOWN_CPU;
309 }
310 else
311 HWACCMR0Globals.lLastError = VERR_HWACCM_NO_CPUID;
312
313#endif /* !VBOX_WITH_HYBIRD_32BIT_KERNEL */
314
315 return VINF_SUCCESS;
316}
317
318
319/**
320 * Checks the error code array filled in for each cpu in the system.
321 *
322 * @returns VBox status code.
323 * @param paRc Error code array
324 * @param cErrorCodes Array size
325 * @param pidCpu Value of the first cpu that set an error (out)
326 */
327static int hwaccmr0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu)
328{
329 int rc = VINF_SUCCESS;
330
331 Assert(cErrorCodes == RTCPUSET_MAX_CPUS);
332
333 for (unsigned i=0;i<cErrorCodes;i++)
334 {
335 if (RTMpIsCpuOnline(i))
336 {
337 if (VBOX_FAILURE(paRc[i]))
338 {
339 rc = paRc[i];
340 *pidCpu = i;
341 break;
342 }
343 }
344 }
345 return rc;
346}
347
348/**
349 * Does global Ring-0 HWACCM termination.
350 *
351 * @returns VBox status code.
352 */
353HWACCMR0DECL(int) HWACCMR0Term()
354{
355 int aRc[RTCPUSET_MAX_CPUS];
356
357 memset(aRc, 0, sizeof(aRc));
358 int rc = RTMpOnAll(HWACCMR0DisableCPU, aRc, NULL);
359 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
360
361 /* Free the per-cpu pages used for VT-x and AMD-V */
362 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
363 {
364 AssertMsg(VBOX_SUCCESS(aRc[i]), ("HWACCMR0DisableCPU failed for cpu %d with rc=%d\n", i, aRc[i]));
365 if (HWACCMR0Globals.aCpuInfo[i].pMemObj)
366 {
367 RTR0MemObjFree(HWACCMR0Globals.aCpuInfo[i].pMemObj, false);
368 HWACCMR0Globals.aCpuInfo[i].pMemObj = NULL;
369 }
370 }
371 return rc;
372}
373
374
375/**
376 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
377 * is to be called on the target cpus.
378 *
379 * @param idCpu The identifier for the CPU the function is called on.
380 * @param pvUser1 The 1st user argument.
381 * @param pvUser2 The 2nd user argument.
382 */
383static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
384{
385 unsigned u32VendorEBX = (uintptr_t)pvUser1;
386 int *paRc = (int *)pvUser2;
387 uint64_t val;
388
389#ifdef LOG_ENABLED
390 SUPR0Printf("HWACCMR0InitCPU cpu %d\n", idCpu);
391#endif
392 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
393
394 if (u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX)
395 {
396 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
397
398 /*
399 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP.
400 * Once the lock bit is set, this MSR can no longer be modified.
401 */
402 if (!(val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)))
403 {
404 /* MSR is not yet locked; we can change it ourselves here */
405 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, HWACCMR0Globals.vmx.msr.feature_ctrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK);
406 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
407 }
408 if ( (val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
409 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
410 paRc[idCpu] = VINF_SUCCESS;
411 else
412 paRc[idCpu] = VERR_VMX_MSR_LOCKED_OR_DISABLED;
413 }
414 else
415 if (u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX)
416 {
417 /* Check if SVM is disabled */
418 val = ASMRdMsr(MSR_K8_VM_CR);
419 if (!(val & MSR_K8_VM_CR_SVM_DISABLE))
420 {
421 /* Turn on SVM in the EFER MSR. */
422 val = ASMRdMsr(MSR_K6_EFER);
423 if (!(val & MSR_K6_EFER_SVME))
424 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
425
426 /* Paranoia. */
427 val = ASMRdMsr(MSR_K6_EFER);
428 if (val & MSR_K6_EFER_SVME)
429 paRc[idCpu] = VINF_SUCCESS;
430 else
431 paRc[idCpu] = VERR_SVM_ILLEGAL_EFER_MSR;
432 }
433 else
434 paRc[idCpu] = HWACCMR0Globals.lLastError = VERR_SVM_DISABLED;
435 }
436 else
437 AssertFailed(); /* can't happen */
438 return;
439}
440
441
442/**
443 * Sets up HWACCM on all cpus.
444 *
445 * @returns VBox status code.
446 * @param pVM The VM to operate on.
447 * @param enmNewHwAccmState New hwaccm state
448 *
449 */
450HWACCMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM, HWACCMSTATE enmNewHwAccmState)
451{
452 Assert(sizeof(HWACCMR0Globals.enmHwAccmState) == sizeof(uint32_t));
453 if (ASMAtomicCmpXchgU32((volatile uint32_t *)&HWACCMR0Globals.enmHwAccmState, enmNewHwAccmState, HWACCMSTATE_UNINITIALIZED))
454 {
455 int aRc[RTCPUSET_MAX_CPUS];
456 RTCPUID idCpu = 0;
457
458 /* Don't setup hwaccm as that might not work (vt-x & 64 bits raw mode) */
459 if (enmNewHwAccmState == HWACCMSTATE_DISABLED)
460 return VINF_SUCCESS;
461
462 memset(aRc, 0, sizeof(aRc));
463
464 /* Allocate one page per cpu for the global vt-x and amd-v pages */
465 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
466 {
467 Assert(!HWACCMR0Globals.aCpuInfo[i].pMemObj);
468
469 /** @todo this is rather dangerous if cpus can be taken offline; we don't care for now */
470 if (RTMpIsCpuOnline(i))
471 {
472 int rc = RTR0MemObjAllocCont(&HWACCMR0Globals.aCpuInfo[i].pMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
473 AssertRC(rc);
474 if (RT_FAILURE(rc))
475 return rc;
476
477 void *pvR0 = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[i].pMemObj);
478 Assert(pvR0);
479 memset(pvR0, 0, PAGE_SIZE);
480
481#ifdef LOG_ENABLED
482 SUPR0Printf("address %x phys %x\n", pvR0, (uint32_t)RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[i].pMemObj, 0));
483#endif
484 }
485 }
486 /* First time, so initialize each cpu/core */
487 int rc = RTMpOnAll(HWACCMR0EnableCPU, (void *)pVM, aRc);
488
489 /* Check the return code of all invocations. */
490 if (VBOX_SUCCESS(rc))
491 rc = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
492
493 AssertMsg(VBOX_SUCCESS(rc), ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", idCpu, rc));
494 return rc;
495 }
496
497 if (HWACCMR0Globals.enmHwAccmState == enmNewHwAccmState)
498 return VINF_SUCCESS;
499
500 /* Request to change the mode is not allowed */
501 return VERR_ACCESS_DENIED;
502}
503
504/**
505 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
506 * is to be called on the target cpus.
507 *
508 * @param idCpu The identifier for the CPU the function is called on.
509 * @param pvUser1 The 1st user argument.
510 * @param pvUser2 The 2nd user argument.
511 */
512static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
513{
514 PVM pVM = (PVM)pvUser1;
515 int *paRc = (int *)pvUser2;
516 void *pvPageCpu;
517 RTHCPHYS pPageCpuPhys;
518
519 Assert(pVM);
520 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
521 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
522
523 /* Should never happen */
524 if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
525 {
526 AssertFailed();
527 return;
528 }
529
530 pvPageCpu = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
531 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
532
533 if (pVM->hwaccm.s.vmx.fSupported)
534 {
535 paRc[idCpu] = VMXR0EnableCpu(idCpu, pVM, pvPageCpu, pPageCpuPhys);
536 AssertRC(paRc[idCpu]);
537 if (VBOX_SUCCESS(paRc[idCpu]))
538 HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured = true;
539 }
540 else
541 if (pVM->hwaccm.s.svm.fSupported)
542 {
543 paRc[idCpu] = SVMR0EnableCpu(idCpu, pVM, pvPageCpu, pPageCpuPhys);
544 AssertRC(paRc[idCpu]);
545 if (VBOX_SUCCESS(paRc[idCpu]))
546 HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured = true;
547 }
548 return;
549}
550
551/**
552 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
553 * is to be called on the target cpus.
554 *
555 * @param idCpu The identifier for the CPU the function is called on.
556 * @param pvUser1 The 1st user argument.
557 * @param pvUser2 The 2nd user argument.
558 */
559static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
560{
561 void *pvPageCpu;
562 RTHCPHYS pPageCpuPhys;
563 int *paRc = (int *)pvUser1;
564
565 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
566 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
567
568 if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
569 return;
570
571 pvPageCpu = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
572 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
573
574 if (HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured)
575 {
576 paRc[idCpu] = VMXR0DisableCpu(idCpu, pvPageCpu, pPageCpuPhys);
577 AssertRC(paRc[idCpu]);
578 HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured = false;
579 }
580 else
581 if (HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured)
582 {
583 paRc[idCpu] = SVMR0DisableCpu(idCpu, pvPageCpu, pPageCpuPhys);
584 AssertRC(paRc[idCpu]);
585 HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured = false;
586 }
587 return;
588}
589
590
591/**
592 * Does Ring-0 per VM HWACCM initialization.
593 *
594 * This is mainly to check that the Host CPU mode is compatible
595 * with VMX.
596 *
597 * @returns VBox status code.
598 * @param pVM The VM to operate on.
599 */
600HWACCMR0DECL(int) HWACCMR0InitVM(PVM pVM)
601{
602 int rc = VINF_SUCCESS;
603
604 AssertReturn(pVM, VERR_INVALID_PARAMETER);
605
606#ifdef LOG_ENABLED
607 SUPR0Printf("HWACCMR0InitVM: %p\n", pVM);
608#endif
609
610 pVM->hwaccm.s.vmx.fSupported = HWACCMR0Globals.vmx.fSupported;
611 pVM->hwaccm.s.svm.fSupported = HWACCMR0Globals.svm.fSupported;
612
613 pVM->hwaccm.s.vmx.msr.feature_ctrl = HWACCMR0Globals.vmx.msr.feature_ctrl;
614 pVM->hwaccm.s.vmx.hostCR4 = HWACCMR0Globals.vmx.hostCR4;
615 pVM->hwaccm.s.vmx.msr.vmx_basic_info = HWACCMR0Globals.vmx.msr.vmx_basic_info;
616 pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = HWACCMR0Globals.vmx.msr.vmx_pin_ctls;
617 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = HWACCMR0Globals.vmx.msr.vmx_proc_ctls;
618 pVM->hwaccm.s.vmx.msr.vmx_exit = HWACCMR0Globals.vmx.msr.vmx_exit;
619 pVM->hwaccm.s.vmx.msr.vmx_entry = HWACCMR0Globals.vmx.msr.vmx_entry;
620 pVM->hwaccm.s.vmx.msr.vmx_misc = HWACCMR0Globals.vmx.msr.vmx_misc;
621 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0;
622 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1;
623 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0;
624 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1;
625 pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = HWACCMR0Globals.vmx.msr.vmx_vmcs_enum;
626 pVM->hwaccm.s.svm.u32Rev = HWACCMR0Globals.svm.u32Rev;
627 pVM->hwaccm.s.svm.u32MaxASID = HWACCMR0Globals.svm.u32MaxASID;
628 pVM->hwaccm.s.cpuid.u32AMDFeatureECX = HWACCMR0Globals.cpuid.u32AMDFeatureECX;
629 pVM->hwaccm.s.cpuid.u32AMDFeatureEDX = HWACCMR0Globals.cpuid.u32AMDFeatureEDX;
630 pVM->hwaccm.s.lLastError = HWACCMR0Globals.lLastError;
631
632 /* Init a VT-x or AMD-V VM. */
633 if (pVM->hwaccm.s.vmx.fSupported)
634 rc = VMXR0InitVM(pVM);
635 else
636 if (pVM->hwaccm.s.svm.fSupported)
637 rc = SVMR0InitVM(pVM);
638
639 return rc;
640}
641
642
643/**
644 * Does Ring-0 per VM HWACCM termination.
645 *
646 * @returns VBox status code.
647 * @param pVM The VM to operate on.
648 */
649HWACCMR0DECL(int) HWACCMR0TermVM(PVM pVM)
650{
651 int rc = VINF_SUCCESS;
652
653 AssertReturn(pVM, VERR_INVALID_PARAMETER);
654
655#ifdef LOG_ENABLED
656 SUPR0Printf("HWACCMR0TermVM: %p\n", pVM);
657#endif
658
659 /* Terminate a VT-x or AMD-V VM. */
660 if (pVM->hwaccm.s.vmx.fSupported)
661 rc = VMXR0TermVM(pVM);
662 else
663 if (pVM->hwaccm.s.svm.fSupported)
664 rc = SVMR0TermVM(pVM);
665
666 return rc;
667}
668
669
670/**
671 * Sets up a VT-x or AMD-V session
672 *
673 * @returns VBox status code.
674 * @param pVM The VM to operate on.
675 */
676HWACCMR0DECL(int) HWACCMR0SetupVM(PVM pVM)
677{
678 int rc = VINF_SUCCESS;
679
680 AssertReturn(pVM, VERR_INVALID_PARAMETER);
681
682#ifdef LOG_ENABLED
683 SUPR0Printf("HWACCMR0SetupVM: %p\n", pVM);
684#endif
685
686 /* Setup VT-x or AMD-V. */
687 if (pVM->hwaccm.s.vmx.fSupported)
688 rc = VMXR0SetupVM(pVM);
689 else
690 if (pVM->hwaccm.s.svm.fSupported)
691 rc = SVMR0SetupVM(pVM);
692
693 return rc;
694}
695
696
697/**
698 * Enters the VT-x or AMD-V session
699 *
700 * @returns VBox status code.
701 * @param pVM The VM to operate on.
702 */
703HWACCMR0DECL(int) HWACCMR0Enter(PVM pVM)
704{
705 CPUMCTX *pCtx;
706 int rc;
707
708 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
709 if (VBOX_FAILURE(rc))
710 return rc;
711
712 /* Always load the guest's FPU/XMM state on-demand. */
713 CPUMDeactivateGuestFPUState(pVM);
714
715 /* Always reload the host context and the guest's CR0 register. (!!!!) */
716 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
717
718 if (pVM->hwaccm.s.vmx.fSupported)
719 {
720 rc = VMXR0Enter(pVM);
721 AssertRC(rc);
722 rc |= VMXR0SaveHostState(pVM);
723 AssertRC(rc);
724 rc |= VMXR0LoadGuestState(pVM, pCtx);
725 AssertRC(rc);
726 if (rc != VINF_SUCCESS)
727 return rc;
728 }
729 else
730 {
731 Assert(pVM->hwaccm.s.svm.fSupported);
732 rc = SVMR0Enter(pVM);
733 AssertRC(rc);
734 rc |= SVMR0LoadGuestState(pVM, pCtx);
735 AssertRC(rc);
736 if (rc != VINF_SUCCESS)
737 return rc;
738
739 }
740 return VINF_SUCCESS;
741}
742
743
744/**
745 * Leaves the VT-x or AMD-V session
746 *
747 * @returns VBox status code.
748 * @param pVM The VM to operate on.
749 */
750HWACCMR0DECL(int) HWACCMR0Leave(PVM pVM)
751{
752 CPUMCTX *pCtx;
753 int rc;
754
755 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
756 if (VBOX_FAILURE(rc))
757 return rc;
758
759 /** @note It's rather tricky with longjmps done by e.g. Log statements or the page fault handler. */
760 /* We must restore the host FPU here to make absolutely sure we don't leave the guest FPU state active
761 * or trash somebody else's FPU state.
762 */
763
764 /* Restore host FPU and XMM state if necessary. */
765 if (CPUMIsGuestFPUStateActive(pVM))
766 {
767 Log2(("CPUMRestoreHostFPUState\n"));
768 /** @note CPUMRestoreHostFPUState keeps the current CR0 intact. */
769 CPUMRestoreHostFPUState(pVM);
770
771 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
772 }
773
774 if (pVM->hwaccm.s.vmx.fSupported)
775 {
776 return VMXR0Leave(pVM);
777 }
778 else
779 {
780 Assert(pVM->hwaccm.s.svm.fSupported);
781 return SVMR0Leave(pVM);
782 }
783}
784
785/**
786 * Runs guest code in a hardware accelerated VM.
787 *
788 * @returns VBox status code.
789 * @param pVM The VM to operate on.
790 */
791HWACCMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM)
792{
793 CPUMCTX *pCtx;
794 int rc;
795
796 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
797 if (VBOX_FAILURE(rc))
798 return rc;
799
800 if (pVM->hwaccm.s.vmx.fSupported)
801 {
802 return VMXR0RunGuestCode(pVM, pCtx);
803 }
804 else
805 {
806 Assert(pVM->hwaccm.s.svm.fSupported);
807 return SVMR0RunGuestCode(pVM, pCtx);
808 }
809}
810
811/**
812 * Invalidates a guest page
813 *
814 * @returns VBox status code.
815 * @param pVM The VM to operate on.
816 * @param GCVirt Page to invalidate
817 */
818HWACCMR0DECL(int) HWACCMR0InvalidatePage(PVM pVM, RTGCPTR GCVirt)
819{
820 if (pVM->hwaccm.s.svm.fSupported)
821 return SVMR0InvalidatePage(pVM, GCVirt);
822
823 return VINF_SUCCESS;
824}
825
826/**
827 * Flushes the guest TLB
828 *
829 * @returns VBox status code.
830 * @param pVM The VM to operate on.
831 */
832HWACCMR0DECL(int) HWACCMR0FlushTLB(PVM pVM)
833{
834 if (pVM->hwaccm.s.svm.fSupported)
835 return SVMR0FlushTLB(pVM);
836
837 return VINF_SUCCESS;
838}
839
840
841#ifdef VBOX_STRICT
842#include <iprt/string.h>
843/**
844 * Dumps a descriptor.
845 *
846 * @param Desc Descriptor to dump.
847 * @param Sel Selector number.
848 * @param pszMsg Message to prepend the log entry with.
849 */
850HWACCMR0DECL(void) HWACCMR0DumpDescriptor(PX86DESCHC Desc, RTSEL Sel, const char *pszMsg)
851{
852 /*
853 * Make variable description string.
854 */
855 static struct
856 {
857 unsigned cch;
858 const char *psz;
859 } const aTypes[32] =
860 {
861 #define STRENTRY(str) { sizeof(str) - 1, str }
862
863 /* system */
864#if HC_ARCH_BITS == 64
865 STRENTRY("Reserved0 "), /* 0x00 */
866 STRENTRY("Reserved1 "), /* 0x01 */
867 STRENTRY("LDT "), /* 0x02 */
868 STRENTRY("Reserved3 "), /* 0x03 */
869 STRENTRY("Reserved4 "), /* 0x04 */
870 STRENTRY("Reserved5 "), /* 0x05 */
871 STRENTRY("Reserved6 "), /* 0x06 */
872 STRENTRY("Reserved7 "), /* 0x07 */
873 STRENTRY("Reserved8 "), /* 0x08 */
874 STRENTRY("TSS64Avail "), /* 0x09 */
875 STRENTRY("ReservedA "), /* 0x0a */
876 STRENTRY("TSS64Busy "), /* 0x0b */
877 STRENTRY("Call64 "), /* 0x0c */
878 STRENTRY("ReservedD "), /* 0x0d */
879 STRENTRY("Int64 "), /* 0x0e */
880 STRENTRY("Trap64 "), /* 0x0f */
881#else
882 STRENTRY("Reserved0 "), /* 0x00 */
883 STRENTRY("TSS16Avail "), /* 0x01 */
884 STRENTRY("LDT "), /* 0x02 */
885 STRENTRY("TSS16Busy "), /* 0x03 */
886 STRENTRY("Call16 "), /* 0x04 */
887 STRENTRY("Task "), /* 0x05 */
888 STRENTRY("Int16 "), /* 0x06 */
889 STRENTRY("Trap16 "), /* 0x07 */
890 STRENTRY("Reserved8 "), /* 0x08 */
891 STRENTRY("TSS32Avail "), /* 0x09 */
892 STRENTRY("ReservedA "), /* 0x0a */
893 STRENTRY("TSS32Busy "), /* 0x0b */
894 STRENTRY("Call32 "), /* 0x0c */
895 STRENTRY("ReservedD "), /* 0x0d */
896 STRENTRY("Int32 "), /* 0x0e */
897 STRENTRY("Trap32 "), /* 0x0f */
898#endif
899 /* non system */
900 STRENTRY("DataRO "), /* 0x10 */
901 STRENTRY("DataRO Accessed "), /* 0x11 */
902 STRENTRY("DataRW "), /* 0x12 */
903 STRENTRY("DataRW Accessed "), /* 0x13 */
904 STRENTRY("DataDownRO "), /* 0x14 */
905 STRENTRY("DataDownRO Accessed "), /* 0x15 */
906 STRENTRY("DataDownRW "), /* 0x16 */
907 STRENTRY("DataDownRW Accessed "), /* 0x17 */
908 STRENTRY("CodeEO "), /* 0x18 */
909 STRENTRY("CodeEO Accessed "), /* 0x19 */
910 STRENTRY("CodeER "), /* 0x1a */
911 STRENTRY("CodeER Accessed "), /* 0x1b */
912 STRENTRY("CodeConfEO "), /* 0x1c */
913 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
914 STRENTRY("CodeConfER "), /* 0x1e */
915 STRENTRY("CodeConfER Accessed ") /* 0x1f */
916 #undef SYSENTRY
917 };
918 #define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
919 char szMsg[128];
920 char *psz = &szMsg[0];
921 unsigned i = Desc->Gen.u1DescType << 4 | Desc->Gen.u4Type;
922 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
923 psz += aTypes[i].cch;
924
925 if (Desc->Gen.u1Present)
926 ADD_STR(psz, "Present ");
927 else
928 ADD_STR(psz, "Not-Present ");
929#if HC_ARCH_BITS == 64
930 if (Desc->Gen.u1Long)
931 ADD_STR(psz, "64-bit ");
932 else
933 ADD_STR(psz, "Comp ");
934#else
935 if (Desc->Gen.u1Granularity)
936 ADD_STR(psz, "Page ");
937 if (Desc->Gen.u1DefBig)
938 ADD_STR(psz, "32-bit ");
939 else
940 ADD_STR(psz, "16-bit ");
941#endif
942 #undef ADD_STR
943 *psz = '\0';
944
945 /*
946 * Limit and Base and format the output.
947 */
948 uint32_t u32Limit = Desc->Gen.u4LimitHigh << 16 | Desc->Gen.u16LimitLow;
949 if (Desc->Gen.u1Granularity)
950 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
951
952#if HC_ARCH_BITS == 64
953 uint64_t u32Base = ((uintptr_t)Desc->Gen.u32BaseHigh3 << 32ULL) | Desc->Gen.u8BaseHigh2 << 24ULL | Desc->Gen.u8BaseHigh1 << 16ULL | Desc->Gen.u16BaseLow;
954
955 Log(("%s %04x - %VX64 %VX64 - base=%VX64 limit=%08x dpl=%d %s\n", pszMsg,
956 Sel, Desc->au64[0], Desc->au64[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
957#else
958 uint32_t u32Base = Desc->Gen.u8BaseHigh2 << 24 | Desc->Gen.u8BaseHigh1 << 16 | Desc->Gen.u16BaseLow;
959
960 Log(("%s %04x - %08x %08x - base=%08x limit=%08x dpl=%d %s\n", pszMsg,
961 Sel, Desc->au32[0], Desc->au32[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
962#endif
963}
964
965/**
966 * Formats a full register dump.
967 *
968 * @param pCtx The context to format.
969 */
970HWACCMR0DECL(void) HWACCMDumpRegs(PCPUMCTX pCtx)
971{
972 /*
973 * Format the flags.
974 */
975 static struct
976 {
977 const char *pszSet; const char *pszClear; uint32_t fFlag;
978 } aFlags[] =
979 {
980 { "vip",NULL, X86_EFL_VIP },
981 { "vif",NULL, X86_EFL_VIF },
982 { "ac", NULL, X86_EFL_AC },
983 { "vm", NULL, X86_EFL_VM },
984 { "rf", NULL, X86_EFL_RF },
985 { "nt", NULL, X86_EFL_NT },
986 { "ov", "nv", X86_EFL_OF },
987 { "dn", "up", X86_EFL_DF },
988 { "ei", "di", X86_EFL_IF },
989 { "tf", NULL, X86_EFL_TF },
990 { "nt", "pl", X86_EFL_SF },
991 { "nz", "zr", X86_EFL_ZF },
992 { "ac", "na", X86_EFL_AF },
993 { "po", "pe", X86_EFL_PF },
994 { "cy", "nc", X86_EFL_CF },
995 };
996 char szEFlags[80];
997 char *psz = szEFlags;
998 uint32_t efl = pCtx->eflags.u32;
999 for (unsigned i = 0; i < ELEMENTS(aFlags); i++)
1000 {
1001 const char *pszAdd = aFlags[i].fFlag & efl ? aFlags[i].pszSet : aFlags[i].pszClear;
1002 if (pszAdd)
1003 {
1004 strcpy(psz, pszAdd);
1005 psz += strlen(pszAdd);
1006 *psz++ = ' ';
1007 }
1008 }
1009 psz[-1] = '\0';
1010
1011
1012 /*
1013 * Format the registers.
1014 */
1015 Log(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
1016 "eip=%08x esp=%08x ebp=%08x iopl=%d %*s\n"
1017 "cs={%04x base=%08x limit=%08x flags=%08x} dr0=%08RX64 dr1=%08RX64\n"
1018 "ds={%04x base=%08x limit=%08x flags=%08x} dr2=%08RX64 dr3=%08RX64\n"
1019 "es={%04x base=%08x limit=%08x flags=%08x} dr4=%08RX64 dr5=%08RX64\n"
1020 "fs={%04x base=%08x limit=%08x flags=%08x} dr6=%08RX64 dr7=%08RX64\n"
1021 ,
1022 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
1023 pCtx->eip, pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
1024 (RTSEL)pCtx->cs, pCtx->csHid.u32Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pCtx->dr0, pCtx->dr1,
1025 (RTSEL)pCtx->ds, pCtx->dsHid.u32Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pCtx->dr2, pCtx->dr3,
1026 (RTSEL)pCtx->es, pCtx->esHid.u32Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pCtx->dr4, pCtx->dr5,
1027 (RTSEL)pCtx->fs, pCtx->fsHid.u32Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pCtx->dr6, pCtx->dr7));
1028
1029 Log(("gs={%04x base=%08x limit=%08x flags=%08x} cr0=%08RX64 cr2=%08RX64\n"
1030 "ss={%04x base=%08x limit=%08x flags=%08x} cr3=%08RX64 cr4=%08RX64\n"
1031 "gdtr=%08x:%04x idtr=%08x:%04x eflags=%08x\n"
1032 "ldtr={%04x base=%08x limit=%08x flags=%08x}\n"
1033 "tr ={%04x base=%08x limit=%08x flags=%08x}\n"
1034 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1035 "FCW=%04x FSW=%04x FTW=%04x\n",
1036 (RTSEL)pCtx->gs, pCtx->gsHid.u32Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pCtx->cr0, pCtx->cr2,
1037 (RTSEL)pCtx->ss, pCtx->ssHid.u32Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pCtx->cr3, pCtx->cr4,
1038 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
1039 (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u32Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1040 (RTSEL)pCtx->tr, pCtx->trHid.u32Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1041 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
1042 pCtx->fpu.FCW, pCtx->fpu.FSW, pCtx->fpu.FTW));
1043
1044
1045}
1046#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette