VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp@ 52172

Last change on this file since 52172 was 49893, checked in by vboxsync, 11 years ago

MSR rewrite: initial hacking - half disabled.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 34.7 KB
Line 
1/* $Id: CPUMR0.cpp 49893 2013-12-13 00:40:20Z vboxsync $ */
2/** @file
3 * CPUM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include "CPUMInternal.h"
25#include <VBox/vmm/vm.h>
26#include <VBox/err.h>
27#include <VBox/log.h>
28#include <VBox/vmm/hm.h>
29#include <iprt/assert.h>
30#include <iprt/asm-amd64-x86.h>
31#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
32# include <iprt/mem.h>
33# include <iprt/memobj.h>
34# include <VBox/apic.h>
35#endif
36#include <iprt/x86.h>
37
38
39/*******************************************************************************
40* Structures and Typedefs *
41*******************************************************************************/
42#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
43/**
44 * Local APIC mappings.
45 */
46typedef struct CPUMHOSTLAPIC
47{
48 /** Indicates that the entry is in use and have valid data. */
49 bool fEnabled;
50 /** Whether it's operating in X2APIC mode (EXTD). */
51 bool fX2Apic;
52 /** The APIC version number. */
53 uint32_t uVersion;
54 /** Has APIC_REG_LVT_THMR. Not used. */
55 uint32_t fHasThermal;
56 /** The physical address of the APIC registers. */
57 RTHCPHYS PhysBase;
58 /** The memory object entering the physical address. */
59 RTR0MEMOBJ hMemObj;
60 /** The mapping object for hMemObj. */
61 RTR0MEMOBJ hMapObj;
62 /** The mapping address APIC registers.
63 * @remarks Different CPUs may use the same physical address to map their
64 * APICs, so this pointer is only valid when on the CPU owning the
65 * APIC. */
66 void *pv;
67} CPUMHOSTLAPIC;
68#endif
69
70
71/*******************************************************************************
72* Global Variables *
73*******************************************************************************/
74#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
75static CPUMHOSTLAPIC g_aLApics[RTCPUSET_MAX_CPUS];
76#endif
77
78/**
79 * CPUID bits to unify among all cores.
80 */
81static struct
82{
83 uint32_t uLeaf; /**< Leaf to check. */
84 uint32_t ecx; /**< which bits in ecx to unify between CPUs. */
85 uint32_t edx; /**< which bits in edx to unify between CPUs. */
86}
87const g_aCpuidUnifyBits[] =
88{
89 {
90 0x00000001,
91 X86_CPUID_FEATURE_ECX_CX16 | X86_CPUID_FEATURE_ECX_MONITOR,
92 X86_CPUID_FEATURE_EDX_CX8
93 }
94};
95
96
97
98/*******************************************************************************
99* Internal Functions *
100*******************************************************************************/
101#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
102static int cpumR0MapLocalApics(void);
103static void cpumR0UnmapLocalApics(void);
104#endif
105static int cpumR0SaveHostDebugState(PVMCPU pVCpu);
106
107
108/**
109 * Does the Ring-0 CPU initialization once during module load.
110 * XXX Host-CPU hot-plugging?
111 */
112VMMR0_INT_DECL(int) CPUMR0ModuleInit(void)
113{
114 int rc = VINF_SUCCESS;
115#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
116 rc = cpumR0MapLocalApics();
117#endif
118 return rc;
119}
120
121
122/**
123 * Terminate the module.
124 */
125VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void)
126{
127#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
128 cpumR0UnmapLocalApics();
129#endif
130 return VINF_SUCCESS;
131}
132
133
134/**
135 *
136 *
137 * Check the CPUID features of this particular CPU and disable relevant features
138 * for the guest which do not exist on this CPU. We have seen systems where the
139 * X86_CPUID_FEATURE_ECX_MONITOR feature flag is only set on some host CPUs, see
140 * @bugref{5436}.
141 *
142 * @note This function might be called simultaneously on more than one CPU!
143 *
144 * @param idCpu The identifier for the CPU the function is called on.
145 * @param pvUser1 Pointer to the VM structure.
146 * @param pvUser2 Ignored.
147 */
148static DECLCALLBACK(void) cpumR0CheckCpuid(RTCPUID idCpu, void *pvUser1, void *pvUser2)
149{
150 PVM pVM = (PVM)pvUser1;
151 PCPUM pCPUM = &pVM->cpum.s;
152
153 NOREF(idCpu); NOREF(pvUser2);
154 for (uint32_t i = 0; i < RT_ELEMENTS(g_aCpuidUnifyBits); i++)
155 {
156 /* Note! Cannot use cpumCpuIdGetLeaf from here because we're not
157 necessarily in the VM process context. So, we using the
158 legacy arrays as temporary storage. */
159
160 uint32_t uLeaf = g_aCpuidUnifyBits[i].uLeaf;
161 PCPUMCPUID pLegacyLeaf;
162 if (uLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
163 pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdStd[uLeaf];
164 else if (uLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
165 pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdExt[uLeaf - UINT32_C(0x80000000)];
166 else if (uLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
167 pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdCentaur[uLeaf - UINT32_C(0xc0000000)];
168 else
169 continue;
170
171 uint32_t eax, ebx, ecx, edx;
172 ASMCpuIdExSlow(uLeaf, 0, 0, 0, &eax, &ebx, &ecx, &edx);
173
174 ASMAtomicAndU32(&pLegacyLeaf->ecx, ecx | ~g_aCpuidUnifyBits[i].ecx);
175 ASMAtomicAndU32(&pLegacyLeaf->edx, edx | ~g_aCpuidUnifyBits[i].edx);
176 }
177}
178
179
180/**
181 * Does Ring-0 CPUM initialization.
182 *
183 * This is mainly to check that the Host CPU mode is compatible
184 * with VBox.
185 *
186 * @returns VBox status code.
187 * @param pVM Pointer to the VM.
188 */
189VMMR0_INT_DECL(int) CPUMR0InitVM(PVM pVM)
190{
191 LogFlow(("CPUMR0Init: %p\n", pVM));
192
193 /*
194 * Check CR0 & CR4 flags.
195 */
196 uint32_t u32CR0 = ASMGetCR0();
197 if ((u32CR0 & (X86_CR0_PE | X86_CR0_PG)) != (X86_CR0_PE | X86_CR0_PG)) /* a bit paranoid perhaps.. */
198 {
199 Log(("CPUMR0Init: PE or PG not set. cr0=%#x\n", u32CR0));
200 return VERR_UNSUPPORTED_CPU_MODE;
201 }
202
203 /*
204 * Check for sysenter and syscall usage.
205 */
206 if (ASMHasCpuId())
207 {
208 /*
209 * SYSENTER/SYSEXIT
210 *
211 * Intel docs claim you should test both the flag and family, model &
212 * stepping because some Pentium Pro CPUs have the SEP cpuid flag set,
213 * but don't support it. AMD CPUs may support this feature in legacy
214 * mode, they've banned it from long mode. Since we switch to 32-bit
215 * mode when entering raw-mode context the feature would become
216 * accessible again on AMD CPUs, so we have to check regardless of
217 * host bitness.
218 */
219 uint32_t u32CpuVersion;
220 uint32_t u32Dummy;
221 uint32_t fFeatures;
222 ASMCpuId(1, &u32CpuVersion, &u32Dummy, &u32Dummy, &fFeatures);
223 uint32_t const u32Family = u32CpuVersion >> 8;
224 uint32_t const u32Model = (u32CpuVersion >> 4) & 0xF;
225 uint32_t const u32Stepping = u32CpuVersion & 0xF;
226 if ( (fFeatures & X86_CPUID_FEATURE_EDX_SEP)
227 && ( u32Family != 6 /* (> pentium pro) */
228 || u32Model >= 3
229 || u32Stepping >= 3
230 || !ASMIsIntelCpu())
231 )
232 {
233 /*
234 * Read the MSR and see if it's in use or not.
235 */
236 uint32_t u32 = ASMRdMsr_Low(MSR_IA32_SYSENTER_CS);
237 if (u32)
238 {
239 pVM->cpum.s.fHostUseFlags |= CPUM_USE_SYSENTER;
240 Log(("CPUMR0Init: host uses sysenter cs=%08x%08x\n", ASMRdMsr_High(MSR_IA32_SYSENTER_CS), u32));
241 }
242 }
243
244 /*
245 * SYSCALL/SYSRET
246 *
247 * This feature is indicated by the SEP bit returned in EDX by CPUID
248 * function 0x80000001. Intel CPUs only supports this feature in
249 * long mode. Since we're not running 64-bit guests in raw-mode there
250 * are no issues with 32-bit intel hosts.
251 */
252 uint32_t cExt = 0;
253 ASMCpuId(0x80000000, &cExt, &u32Dummy, &u32Dummy, &u32Dummy);
254 if (ASMIsValidExtRange(cExt))
255 {
256 uint32_t fExtFeaturesEDX = ASMCpuId_EDX(0x80000001);
257 if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
258 {
259#ifdef RT_ARCH_X86
260# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
261 if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
262# else
263 if (!ASMIsIntelCpu())
264# endif
265#endif
266 {
267 uint64_t fEfer = ASMRdMsr(MSR_K6_EFER);
268 if (fEfer & MSR_K6_EFER_SCE)
269 {
270 pVM->cpum.s.fHostUseFlags |= CPUM_USE_SYSCALL;
271 Log(("CPUMR0Init: host uses syscall\n"));
272 }
273 }
274 }
275 }
276
277 /*
278 * Unify/cross check some CPUID feature bits on all available CPU cores
279 * and threads. We've seen CPUs where the monitor support differed.
280 *
281 * Because the hyper heap isn't always mapped into ring-0, we cannot
282 * access it from a RTMpOnAll callback. We use the legacy CPUID arrays
283 * as temp ring-0 accessible memory instead, ASSUMING that they're all
284 * up to date when we get here.
285 */
286 RTMpOnAll(cpumR0CheckCpuid, pVM, NULL);
287
288 for (uint32_t i = 0; i < RT_ELEMENTS(g_aCpuidUnifyBits); i++)
289 {
290 uint32_t uLeaf = g_aCpuidUnifyBits[i].uLeaf;
291 PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVM, uLeaf, 0);
292 if (pLeaf)
293 {
294 PCPUMCPUID pLegacyLeaf;
295 if (uLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
296 pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdStd[uLeaf];
297 else if (uLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
298 pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdExt[uLeaf - UINT32_C(0x80000000)];
299 else if (uLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
300 pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdCentaur[uLeaf - UINT32_C(0xc0000000)];
301 else
302 continue;
303
304 pLeaf->uEcx = pLegacyLeaf->ecx;
305 pLeaf->uEdx = pLegacyLeaf->edx;
306 }
307 }
308
309 }
310
311
312 /*
313 * Check if debug registers are armed.
314 * This ASSUMES that DR7.GD is not set, or that it's handled transparently!
315 */
316 uint32_t u32DR7 = ASMGetDR7();
317 if (u32DR7 & X86_DR7_ENABLED_MASK)
318 {
319 for (VMCPUID i = 0; i < pVM->cCpus; i++)
320 pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HOST;
321 Log(("CPUMR0Init: host uses debug registers (dr7=%x)\n", u32DR7));
322 }
323
324 return VINF_SUCCESS;
325}
326
327
328/**
329 * Trap handler for device-not-available fault (#NM).
330 * Device not available, FP or (F)WAIT instruction.
331 *
332 * @returns VBox status code.
333 * @retval VINF_SUCCESS if the guest FPU state is loaded.
334 * @retval VINF_EM_RAW_GUEST_TRAP if it is a guest trap.
335 *
336 * @param pVM Pointer to the VM.
337 * @param pVCpu Pointer to the VMCPU.
338 * @param pCtx Pointer to the guest-CPU context.
339 */
340VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
341{
342 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
343 Assert(ASMGetCR4() & X86_CR4_OSFSXR);
344
345 /* If the FPU state has already been loaded, then it's a guest trap. */
346 if (CPUMIsGuestFPUStateActive(pVCpu))
347 {
348 Assert( ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS))
349 || ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS | X86_CR0_EM)));
350 return VINF_EM_RAW_GUEST_TRAP;
351 }
352
353 /*
354 * There are two basic actions:
355 * 1. Save host fpu and restore guest fpu.
356 * 2. Generate guest trap.
357 *
358 * When entering the hypervisor we'll always enable MP (for proper wait
359 * trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
360 * is taken from the guest OS in order to get proper SSE handling.
361 *
362 *
363 * Actions taken depending on the guest CR0 flags:
364 *
365 * 3 2 1
366 * TS | EM | MP | FPUInstr | WAIT :: VMM Action
367 * ------------------------------------------------------------------------
368 * 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
369 * 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
370 * 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC.
371 * 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
372 * 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
373 * 1 | 0 | 1 | #NM | #NM :: Go to guest taking trap there.
374 * 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
375 * 1 | 1 | 1 | #NM | #NM :: Go to guest taking trap there.
376 */
377
378 switch (pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
379 {
380 case X86_CR0_MP | X86_CR0_TS:
381 case X86_CR0_MP | X86_CR0_TS | X86_CR0_EM:
382 return VINF_EM_RAW_GUEST_TRAP;
383 default:
384 break;
385 }
386
387 return CPUMR0LoadGuestFPU(pVM, pVCpu, pCtx);
388}
389
390
391/**
392 * Saves the host-FPU/XMM state and loads the guest-FPU state into the CPU.
393 *
394 * @returns VBox status code.
395 *
396 * @param pVM Pointer to the VM.
397 * @param pVCpu Pointer to the VMCPU.
398 * @param pCtx Pointer to the guest-CPU context.
399 */
400VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
401{
402
403 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
404#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
405 if (CPUMIsGuestInLongModeEx(pCtx))
406 {
407 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE));
408
409 /* Save the host state and record the fact (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM). */
410 cpumR0SaveHostFPUState(&pVCpu->cpum.s);
411
412 /* Restore the state on entry as we need to be in 64-bit mode to access the full state. */
413 pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_FPU_STATE;
414 }
415 else
416#endif
417 {
418 NOREF(pCtx);
419 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE));
420 /** @todo Move the FFXR handling down into
421 * cpumR0SaveHostRestoreGuestFPUState to optimize the
422 * VBOX_WITH_KERNEL_USING_XMM handling. */
423 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
424 uint64_t uHostEfer = 0;
425 bool fRestoreEfer = false;
426 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
427 {
428 uHostEfer = ASMRdMsr(MSR_K6_EFER);
429 if (uHostEfer & MSR_K6_EFER_FFXSR)
430 {
431 ASMWrMsr(MSR_K6_EFER, uHostEfer & ~MSR_K6_EFER_FFXSR);
432 pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE;
433 fRestoreEfer = true;
434 }
435 }
436
437 /* Do the job and record that we've switched FPU state. */
438 cpumR0SaveHostRestoreGuestFPUState(&pVCpu->cpum.s);
439
440 /* Restore EFER. */
441 if (fRestoreEfer)
442 ASMWrMsr(MSR_K6_EFER, uHostEfer);
443 }
444
445 Assert((pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)) == (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM));
446 return VINF_SUCCESS;
447}
448
449
450/**
451 * Save guest FPU/XMM state
452 *
453 * @returns VBox status code.
454 * @param pVM Pointer to the VM.
455 * @param pVCpu Pointer to the VMCPU.
456 * @param pCtx Pointer to the guest CPU context.
457 */
458VMMR0_INT_DECL(int) CPUMR0SaveGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
459{
460 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
461 Assert(ASMGetCR4() & X86_CR4_OSFSXR);
462 AssertReturn((pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU), VINF_SUCCESS);
463 NOREF(pVM); NOREF(pCtx);
464
465#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
466 if (CPUMIsGuestInLongModeEx(pCtx))
467 {
468 if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE))
469 {
470 HMR0SaveFPUState(pVM, pVCpu, pCtx);
471 cpumR0RestoreHostFPUState(&pVCpu->cpum.s);
472 }
473 /* else nothing to do; we didn't perform a world switch */
474 }
475 else
476#endif
477 {
478#ifdef VBOX_WITH_KERNEL_USING_XMM
479 /*
480 * We've already saved the XMM registers in the assembly wrapper, so
481 * we have to save them before saving the entire FPU state and put them
482 * back afterwards.
483 */
484 /** @todo This could be skipped if MSR_K6_EFER_FFXSR is set, but
485 * I'm not able to test such an optimization tonight.
486 * We could just all this in assembly. */
487 uint128_t aGuestXmmRegs[16];
488 memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest.fpu.aXMM[0], sizeof(aGuestXmmRegs));
489#endif
490
491 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
492 uint64_t uHostEfer = 0;
493 bool fRestoreEfer = false;
494 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
495 {
496 uHostEfer = ASMRdMsr(MSR_K6_EFER);
497 if (uHostEfer & MSR_K6_EFER_FFXSR)
498 {
499 ASMWrMsr(MSR_K6_EFER, uHostEfer & ~MSR_K6_EFER_FFXSR);
500 fRestoreEfer = true;
501 }
502 }
503
504 cpumR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s);
505
506 /* Restore EFER MSR */
507 if (fRestoreEfer)
508 ASMWrMsr(MSR_K6_EFER, uHostEfer | MSR_K6_EFER_FFXSR);
509
510#ifdef VBOX_WITH_KERNEL_USING_XMM
511 memcpy(&pVCpu->cpum.s.Guest.fpu.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs));
512#endif
513 }
514
515 pVCpu->cpum.s.fUseFlags &= ~(CPUM_USED_FPU | CPUM_SYNC_FPU_STATE | CPUM_USED_MANUAL_XMM_RESTORE);
516 return VINF_SUCCESS;
517}
518
519
520/**
521 * Saves the host debug state, setting CPUM_USED_HOST_DEBUG_STATE and loading
522 * DR7 with safe values.
523 *
524 * @returns VBox status code.
525 * @param pVCpu Pointer to the VMCPU.
526 */
527static int cpumR0SaveHostDebugState(PVMCPU pVCpu)
528{
529 /*
530 * Save the host state.
531 */
532#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
533 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
534 cpumR0SaveDRx(&pVCpu->cpum.s.Host.dr0);
535#else
536 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
537 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
538 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
539 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
540#endif
541 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
542 /** @todo dr7 might already have been changed to 0x400; don't care right now as it's harmless. */
543 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
544
545 /* Preemption paranoia. */
546 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HOST);
547
548 /*
549 * Make sure DR7 is harmless or else we could trigger breakpoints when
550 * load guest or hypervisor DRx values later.
551 */
552 if (pVCpu->cpum.s.Host.dr7 != X86_DR7_INIT_VAL)
553 ASMSetDR7(X86_DR7_INIT_VAL);
554
555 return VINF_SUCCESS;
556}
557
558
559/**
560 * Saves the guest DRx state residing in host registers and restore the host
561 * register values.
562 *
563 * The guest DRx state is only saved if CPUMR0LoadGuestDebugState was called,
564 * since it's assumed that we're shadowing the guest DRx register values
565 * accurately when using the combined hypervisor debug register values
566 * (CPUMR0LoadHyperDebugState).
567 *
568 * @returns true if either guest or hypervisor debug registers were loaded.
569 * @param pVCpu The cross context CPU structure for the calling EMT.
570 * @param fDr6 Whether to include DR6 or not.
571 * @thread EMT(pVCpu)
572 */
573VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDr6)
574{
575 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
576 bool const fDrXLoaded = RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER));
577
578 /*
579 * Do we need to save the guest DRx registered loaded into host registers?
580 * (DR7 and DR6 (if fDr6 is true) are left to the caller.)
581 */
582 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST)
583 {
584#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
585 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
586 {
587 uint64_t uDr6 = pVCpu->cpum.s.Guest.dr[6];
588 HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
589 if (!fDr6)
590 pVCpu->cpum.s.Guest.dr[6] = uDr6;
591 }
592 else
593#endif
594 {
595#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
596 cpumR0SaveDRx(&pVCpu->cpum.s.Guest.dr[0]);
597#else
598 pVCpu->cpum.s.Guest.dr[0] = ASMGetDR0();
599 pVCpu->cpum.s.Guest.dr[1] = ASMGetDR1();
600 pVCpu->cpum.s.Guest.dr[2] = ASMGetDR2();
601 pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3();
602#endif
603 if (fDr6)
604 pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6();
605 }
606 }
607 ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~( CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER
608 | CPUM_SYNC_DEBUG_REGS_GUEST | CPUM_SYNC_DEBUG_REGS_HYPER));
609
610 /*
611 * Restore the host's debug state. DR0-3, DR6 and only then DR7!
612 */
613 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST)
614 {
615 /* A bit of paranoia first... */
616 uint64_t uCurDR7 = ASMGetDR7();
617 if (uCurDR7 != X86_DR7_INIT_VAL)
618 ASMSetDR7(X86_DR7_INIT_VAL);
619
620#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
621 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
622 cpumR0LoadDRx(&pVCpu->cpum.s.Host.dr0);
623#else
624 ASMSetDR0(pVCpu->cpum.s.Host.dr0);
625 ASMSetDR1(pVCpu->cpum.s.Host.dr1);
626 ASMSetDR2(pVCpu->cpum.s.Host.dr2);
627 ASMSetDR3(pVCpu->cpum.s.Host.dr3);
628#endif
629 /** @todo consider only updating if they differ, esp. DR6. Need to figure how
630 * expensive DRx reads are over DRx writes. */
631 ASMSetDR6(pVCpu->cpum.s.Host.dr6);
632 ASMSetDR7(pVCpu->cpum.s.Host.dr7);
633
634 ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~CPUM_USED_DEBUG_REGS_HOST);
635 }
636
637 return fDrXLoaded;
638}
639
640
641/**
642 * Saves the guest DRx state if it resides host registers.
643 *
644 * This does NOT clear any use flags, so the host registers remains loaded with
645 * the guest DRx state upon return. The purpose is only to make sure the values
646 * in the CPU context structure is up to date.
647 *
648 * @returns true if the host registers contains guest values, false if not.
649 * @param pVCpu The cross context CPU structure for the calling EMT.
650 * @param fDr6 Whether to include DR6 or not.
651 * @thread EMT(pVCpu)
652 */
653VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuest(PVMCPU pVCpu, bool fDr6)
654{
655 /*
656 * Do we need to save the guest DRx registered loaded into host registers?
657 * (DR7 and DR6 (if fDr6 is true) are left to the caller.)
658 */
659 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST)
660 {
661#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
662 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
663 {
664 uint64_t uDr6 = pVCpu->cpum.s.Guest.dr[6];
665 HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
666 if (!fDr6)
667 pVCpu->cpum.s.Guest.dr[6] = uDr6;
668 }
669 else
670#endif
671 {
672#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
673 cpumR0SaveDRx(&pVCpu->cpum.s.Guest.dr[0]);
674#else
675 pVCpu->cpum.s.Guest.dr[0] = ASMGetDR0();
676 pVCpu->cpum.s.Guest.dr[1] = ASMGetDR1();
677 pVCpu->cpum.s.Guest.dr[2] = ASMGetDR2();
678 pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3();
679#endif
680 if (fDr6)
681 pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6();
682 }
683 return true;
684 }
685 return false;
686}
687
688
689/**
690 * Lazily sync in the debug state.
691 *
692 * @param pVCpu The cross context CPU structure for the calling EMT.
693 * @param fDr6 Whether to include DR6 or not.
694 * @thread EMT(pVCpu)
695 */
696VMMR0_INT_DECL(void) CPUMR0LoadGuestDebugState(PVMCPU pVCpu, bool fDr6)
697{
698 /*
699 * Save the host state and disarm all host BPs.
700 */
701 cpumR0SaveHostDebugState(pVCpu);
702 Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
703
704 /*
705 * Activate the guest state DR0-3.
706 * DR7 and DR6 (if fDr6 is true) are left to the caller.
707 */
708#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
709 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
710 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_GUEST); /* Postpone it to the world switch. */
711 else
712#endif
713 {
714#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
715 cpumR0LoadDRx(&pVCpu->cpum.s.Guest.dr[0]);
716#else
717 ASMSetDR0(pVCpu->cpum.s.Guest.dr[0]);
718 ASMSetDR1(pVCpu->cpum.s.Guest.dr[1]);
719 ASMSetDR2(pVCpu->cpum.s.Guest.dr[2]);
720 ASMSetDR3(pVCpu->cpum.s.Guest.dr[3]);
721#endif
722 if (fDr6)
723 ASMSetDR6(pVCpu->cpum.s.Guest.dr[6]);
724
725 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_GUEST);
726 }
727}
728
729
730/**
731 * Lazily sync in the hypervisor debug state
732 *
733 * @returns VBox status code.
734 * @param pVCpu The cross context CPU structure for the calling EMT.
735 * @param fDr6 Whether to include DR6 or not.
736 * @thread EMT(pVCpu)
737 */
738VMMR0_INT_DECL(void) CPUMR0LoadHyperDebugState(PVMCPU pVCpu, bool fDr6)
739{
740 /*
741 * Save the host state and disarm all host BPs.
742 */
743 cpumR0SaveHostDebugState(pVCpu);
744 Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
745
746 /*
747 * Make sure the hypervisor values are up to date.
748 */
749 CPUMRecalcHyperDRx(pVCpu, UINT8_MAX /* no loading, please */, true);
750
751 /*
752 * Activate the guest state DR0-3.
753 * DR7 and DR6 (if fDr6 is true) are left to the caller.
754 */
755#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
756 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
757 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_HYPER); /* Postpone it. */
758 else
759#endif
760 {
761#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
762 cpumR0LoadDRx(&pVCpu->cpum.s.Hyper.dr[0]);
763#else
764 ASMSetDR0(pVCpu->cpum.s.Hyper.dr[0]);
765 ASMSetDR1(pVCpu->cpum.s.Hyper.dr[1]);
766 ASMSetDR2(pVCpu->cpum.s.Hyper.dr[2]);
767 ASMSetDR3(pVCpu->cpum.s.Hyper.dr[3]);
768#endif
769 if (fDr6)
770 ASMSetDR6(X86_DR6_INIT_VAL);
771
772 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HYPER);
773 }
774}
775
776#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
777
778/**
779 * Per-CPU callback that probes the CPU for APIC support.
780 *
781 * @param idCpu The identifier for the CPU the function is called on.
782 * @param pvUser1 Ignored.
783 * @param pvUser2 Ignored.
784 */
785static DECLCALLBACK(void) cpumR0MapLocalApicCpuProber(RTCPUID idCpu, void *pvUser1, void *pvUser2)
786{
787 NOREF(pvUser1); NOREF(pvUser2);
788 int iCpu = RTMpCpuIdToSetIndex(idCpu);
789 AssertReturnVoid(iCpu >= 0 && (unsigned)iCpu < RT_ELEMENTS(g_aLApics));
790
791 /*
792 * Check for APIC support.
793 */
794 uint32_t uMaxLeaf, u32EBX, u32ECX, u32EDX;
795 ASMCpuId(0, &uMaxLeaf, &u32EBX, &u32ECX, &u32EDX);
796 if ( ( ASMIsIntelCpuEx(u32EBX, u32ECX, u32EDX)
797 || ASMIsAmdCpuEx(u32EBX, u32ECX, u32EDX)
798 || ASMIsViaCentaurCpuEx(u32EBX, u32ECX, u32EDX))
799 && ASMIsValidStdRange(uMaxLeaf))
800 {
801 uint32_t uDummy;
802 ASMCpuId(1, &uDummy, &u32EBX, &u32ECX, &u32EDX);
803 if ( (u32EDX & X86_CPUID_FEATURE_EDX_APIC)
804 && (u32EDX & X86_CPUID_FEATURE_EDX_MSR))
805 {
806 /*
807 * Safe to access the MSR. Read it and calc the BASE (a little complicated).
808 */
809 uint64_t u64ApicBase = ASMRdMsr(MSR_IA32_APICBASE);
810 uint64_t u64Mask = MSR_IA32_APICBASE_BASE_MIN;
811
812 /* see Intel Manual: Local APIC Status and Location: MAXPHYADDR default is bit 36 */
813 uint32_t uMaxExtLeaf;
814 ASMCpuId(0x80000000, &uMaxExtLeaf, &u32EBX, &u32ECX, &u32EDX);
815 if ( uMaxExtLeaf >= UINT32_C(0x80000008)
816 && ASMIsValidExtRange(uMaxExtLeaf))
817 {
818 uint32_t u32PhysBits;
819 ASMCpuId(0x80000008, &u32PhysBits, &u32EBX, &u32ECX, &u32EDX);
820 u32PhysBits &= 0xff;
821 u64Mask = ((UINT64_C(1) << u32PhysBits) - 1) & UINT64_C(0xfffffffffffff000);
822 }
823
824 AssertCompile(sizeof(g_aLApics[iCpu].PhysBase) == sizeof(u64ApicBase));
825 g_aLApics[iCpu].PhysBase = u64ApicBase & u64Mask;
826 g_aLApics[iCpu].fEnabled = RT_BOOL(u64ApicBase & MSR_IA32_APICBASE_EN);
827 g_aLApics[iCpu].fX2Apic = (u64ApicBase & (MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_EN))
828 == (MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_EN);
829 }
830 }
831}
832
833
834
835/**
836 * Per-CPU callback that verifies our APIC expectations.
837 *
838 * @param idCpu The identifier for the CPU the function is called on.
839 * @param pvUser1 Ignored.
840 * @param pvUser2 Ignored.
841 */
842static DECLCALLBACK(void) cpumR0MapLocalApicCpuChecker(RTCPUID idCpu, void *pvUser1, void *pvUser2)
843{
844 NOREF(pvUser1); NOREF(pvUser2);
845
846 int iCpu = RTMpCpuIdToSetIndex(idCpu);
847 AssertReturnVoid(iCpu >= 0 && (unsigned)iCpu < RT_ELEMENTS(g_aLApics));
848 if (!g_aLApics[iCpu].fEnabled)
849 return;
850
851 /*
852 * 0x0X 82489 external APIC
853 * 0x1X Local APIC
854 * 0x2X..0xFF reserved
855 */
856 uint32_t uApicVersion;
857 if (g_aLApics[iCpu].fX2Apic)
858 uApicVersion = ApicX2RegRead32(APIC_REG_VERSION);
859 else
860 uApicVersion = ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_VERSION);
861 if ((APIC_REG_VERSION_GET_VER(uApicVersion) & 0xF0) == 0x10)
862 {
863 g_aLApics[iCpu].uVersion = uApicVersion;
864 g_aLApics[iCpu].fHasThermal = APIC_REG_VERSION_GET_MAX_LVT(uApicVersion) >= 5;
865
866#if 0 /* enable if you need it. */
867 if (g_aLApics[iCpu].fX2Apic)
868 SUPR0Printf("CPUM: X2APIC %02u - ver %#010x, lint0=%#07x lint1=%#07x pc=%#07x thmr=%#07x\n",
869 iCpu, uApicVersion,
870 ApicX2RegRead32(APIC_REG_LVT_LINT0), ApicX2RegRead32(APIC_REG_LVT_LINT1),
871 ApicX2RegRead32(APIC_REG_LVT_PC), ApicX2RegRead32(APIC_REG_LVT_THMR) );
872 else
873 SUPR0Printf("CPUM: APIC %02u at %RGp (mapped at %p) - ver %#010x, lint0=%#07x lint1=%#07x pc=%#07x thmr=%#07x\n",
874 iCpu, g_aLApics[iCpu].PhysBase, g_aLApics[iCpu].pv, uApicVersion,
875 ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_LINT0), ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_LINT1),
876 ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_PC), ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_THMR) );
877#endif
878 }
879 else
880 {
881 g_aLApics[iCpu].fEnabled = false;
882 g_aLApics[iCpu].fX2Apic = false;
883 SUPR0Printf("VBox/CPUM: Unsupported APIC version %#x (iCpu=%d)\n", uApicVersion, iCpu);
884 }
885}
886
887
888/**
889 * Map the MMIO page of each local APIC in the system.
890 */
891static int cpumR0MapLocalApics(void)
892{
893 /*
894 * Check that we'll always stay within the array bounds.
895 */
896 if (RTMpGetArraySize() > RT_ELEMENTS(g_aLApics))
897 {
898 LogRel(("CPUM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(g_aLApics)));
899 return VERR_TOO_MANY_CPUS;
900 }
901
902 /*
903 * Create mappings for all online CPUs we think have legacy APICs.
904 */
905 int rc = RTMpOnAll(cpumR0MapLocalApicCpuProber, NULL, NULL);
906
907 for (unsigned iCpu = 0; RT_SUCCESS(rc) && iCpu < RT_ELEMENTS(g_aLApics); iCpu++)
908 {
909 if (g_aLApics[iCpu].fEnabled && !g_aLApics[iCpu].fX2Apic)
910 {
911 rc = RTR0MemObjEnterPhys(&g_aLApics[iCpu].hMemObj, g_aLApics[iCpu].PhysBase,
912 PAGE_SIZE, RTMEM_CACHE_POLICY_MMIO);
913 if (RT_SUCCESS(rc))
914 {
915 rc = RTR0MemObjMapKernel(&g_aLApics[iCpu].hMapObj, g_aLApics[iCpu].hMemObj, (void *)-1,
916 PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
917 if (RT_SUCCESS(rc))
918 {
919 g_aLApics[iCpu].pv = RTR0MemObjAddress(g_aLApics[iCpu].hMapObj);
920 continue;
921 }
922 RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */);
923 }
924 g_aLApics[iCpu].fEnabled = false;
925 }
926 g_aLApics[iCpu].pv = NULL;
927 }
928
929 /*
930 * Check the APICs.
931 */
932 if (RT_SUCCESS(rc))
933 rc = RTMpOnAll(cpumR0MapLocalApicCpuChecker, NULL, NULL);
934
935 if (RT_FAILURE(rc))
936 {
937 cpumR0UnmapLocalApics();
938 return rc;
939 }
940
941#ifdef LOG_ENABLED
942 /*
943 * Log the result (pretty useless, requires enabling CPUM in VBoxDrv
944 * and !VBOX_WITH_R0_LOGGING).
945 */
946 if (LogIsEnabled())
947 {
948 uint32_t cEnabled = 0;
949 uint32_t cX2Apics = 0;
950 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_aLApics); iCpu++)
951 if (g_aLApics[iCpu].fEnabled)
952 {
953 cEnabled++;
954 cX2Apics += g_aLApics[iCpu].fX2Apic;
955 }
956 Log(("CPUM: %u APICs, %u X2APICs\n", cEnabled, cX2Apics));
957 }
958#endif
959
960 return VINF_SUCCESS;
961}
962
963
964/**
965 * Unmap the Local APIC of all host CPUs.
966 */
967static void cpumR0UnmapLocalApics(void)
968{
969 for (unsigned iCpu = RT_ELEMENTS(g_aLApics); iCpu-- > 0;)
970 {
971 if (g_aLApics[iCpu].pv)
972 {
973 RTR0MemObjFree(g_aLApics[iCpu].hMapObj, true /* fFreeMappings */);
974 RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */);
975 g_aLApics[iCpu].hMapObj = NIL_RTR0MEMOBJ;
976 g_aLApics[iCpu].hMemObj = NIL_RTR0MEMOBJ;
977 g_aLApics[iCpu].fEnabled = false;
978 g_aLApics[iCpu].fX2Apic = false;
979 g_aLApics[iCpu].pv = NULL;
980 }
981 }
982}
983
984
985/**
986 * Updates CPUMCPU::pvApicBase and CPUMCPU::fX2Apic prior to world switch.
987 *
988 * Writes the Local APIC mapping address of the current host CPU to CPUMCPU so
989 * the world switchers can access the APIC registers for the purpose of
990 * disabling and re-enabling the NMIs. Must be called with disabled preemption
991 * or disabled interrupts!
992 *
993 * @param pVCpu Pointer to the cross context CPU structure of the
994 * calling EMT.
995 * @param idHostCpu The ID of the current host CPU.
996 */
997VMMR0_INT_DECL(void) CPUMR0SetLApic(PVMCPU pVCpu, RTCPUID idHostCpu)
998{
999 int idxCpu = RTMpCpuIdToSetIndex(idHostCpu);
1000 pVCpu->cpum.s.pvApicBase = g_aLApics[idxCpu].pv;
1001 pVCpu->cpum.s.fX2Apic = g_aLApics[idxCpu].fX2Apic;
1002// Log6(("CPUMR0SetLApic: pvApicBase=%p fX2Apic=%d\n", g_aLApics[idxCpu].pv, g_aLApics[idxCpu].fX2Apic));
1003}
1004
1005#endif /* VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI */
1006
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette