VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp@ 33935

Last change on this file since 33935 was 33935, checked in by vboxsync, 14 years ago

VMM: mask all Local APIC interrupt vectors which are set up to NMI mode during world switch (raw mode only)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 25.7 KB
Line 
1/* $Id: CPUMR0.cpp 33935 2010-11-10 15:37:02Z vboxsync $ */
2/** @file
3 * CPUM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/cpum.h>
24#include "CPUMInternal.h"
25#include <VBox/vm.h>
26#include <VBox/x86.h>
27#include <VBox/err.h>
28#include <VBox/log.h>
29#include <VBox/hwaccm.h>
30#include <iprt/assert.h>
31#include <iprt/asm-amd64-x86.h>
32#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
33# include <iprt/mem.h>
34# include <iprt/memobj.h>
35# include <VBox/apic.h>
36#endif
37
38
39#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
40/** Local APIC mappings */
41typedef struct
42{
43 bool fEnabled;
44 uint64_t PhysBase;
45 RTR0MEMOBJ hMemObj;
46 RTR0MEMOBJ hMapObj;
47 void *pv;
48 uint32_t fHasThermal;
49} CPUMHOSTLAPIC;
50
51static CPUMHOSTLAPIC g_aLApics[RTCPUSET_MAX_CPUS];
52static int cpumR0MapLocalApics(void);
53static void cpumR0UnmapLocalApics(void);
54#endif
55
56
57/**
58 * Does the Ring-0 CPU initialization once during module load.
59 * XXX Host-CPU hot-plugging?
60 */
61VMMR0DECL(int) CPUMR0ModuleInit(void)
62{
63#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
64 return cpumR0MapLocalApics();
65#endif
66}
67
68
69/**
70 * Terminate the module.
71 */
72VMMR0DECL(int) CPUMR0ModuleTerm(void)
73{
74#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
75 cpumR0UnmapLocalApics();
76#endif
77 return VINF_SUCCESS;
78}
79
80
81/**
82 * Does Ring-0 CPUM initialization.
83 *
84 * This is mainly to check that the Host CPU mode is compatible
85 * with VBox.
86 *
87 * @returns VBox status code.
88 * @param pVM The VM to operate on.
89 */
90VMMR0DECL(int) CPUMR0Init(PVM pVM)
91{
92 LogFlow(("CPUMR0Init: %p\n", pVM));
93
94#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
95 for (unsigned i = 0; i < RT_ELEMENTS(g_aLApics); i++)
96 if (g_aLApics[i].pv)
97 SUPR0Printf(" CPU%d: %llx => %llx\n", i, g_aLApics[i].PhysBase, (uint64_t)g_aLApics[i].pv);
98#endif
99
100 /*
101 * Check CR0 & CR4 flags.
102 */
103 uint32_t u32CR0 = ASMGetCR0();
104 if ((u32CR0 & (X86_CR0_PE | X86_CR0_PG)) != (X86_CR0_PE | X86_CR0_PG)) /* a bit paranoid perhaps.. */
105 {
106 Log(("CPUMR0Init: PE or PG not set. cr0=%#x\n", u32CR0));
107 return VERR_UNSUPPORTED_CPU_MODE;
108 }
109
110 /*
111 * Check for sysenter and syscall usage.
112 */
113 if (ASMHasCpuId())
114 {
115 /*
116 * SYSENTER/SYSEXIT
117 *
118 * Intel docs claim you should test both the flag and family, model &
119 * stepping because some Pentium Pro CPUs have the SEP cpuid flag set,
120 * but don't support it. AMD CPUs may support this feature in legacy
121 * mode, they've banned it from long mode. Since we switch to 32-bit
122 * mode when entering raw-mode context the feature would become
123 * accessible again on AMD CPUs, so we have to check regardless of
124 * host bitness.
125 */
126 uint32_t u32CpuVersion;
127 uint32_t u32Dummy;
128 uint32_t fFeatures;
129 ASMCpuId(1, &u32CpuVersion, &u32Dummy, &u32Dummy, &fFeatures);
130 uint32_t u32Family = u32CpuVersion >> 8;
131 uint32_t u32Model = (u32CpuVersion >> 4) & 0xF;
132 uint32_t u32Stepping = u32CpuVersion & 0xF;
133 if ( (fFeatures & X86_CPUID_FEATURE_EDX_SEP)
134 && ( u32Family != 6 /* (> pentium pro) */
135 || u32Model >= 3
136 || u32Stepping >= 3
137 || !ASMIsIntelCpu())
138 )
139 {
140 /*
141 * Read the MSR and see if it's in use or not.
142 */
143 uint32_t u32 = ASMRdMsr_Low(MSR_IA32_SYSENTER_CS);
144 if (u32)
145 {
146 pVM->cpum.s.fHostUseFlags |= CPUM_USE_SYSENTER;
147 Log(("CPUMR0Init: host uses sysenter cs=%08x%08x\n", ASMRdMsr_High(MSR_IA32_SYSENTER_CS), u32));
148 }
149 }
150
151 /*
152 * SYSCALL/SYSRET
153 *
154 * This feature is indicated by the SEP bit returned in EDX by CPUID
155 * function 0x80000001. Intel CPUs only supports this feature in
156 * long mode. Since we're not running 64-bit guests in raw-mode there
157 * are no issues with 32-bit intel hosts.
158 */
159 uint32_t cExt = 0;
160 ASMCpuId(0x80000000, &cExt, &u32Dummy, &u32Dummy, &u32Dummy);
161 if ( cExt >= 0x80000001
162 && cExt <= 0x8000ffff)
163 {
164 uint32_t fExtFeaturesEDX = ASMCpuId_EDX(0x80000001);
165 if (fExtFeaturesEDX & X86_CPUID_AMD_FEATURE_EDX_SEP)
166 {
167#ifdef RT_ARCH_X86
168# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
169 if (fExtFeaturesEDX & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
170# else
171 if (!ASMIsIntelCpu())
172# endif
173#endif
174 {
175 uint64_t fEfer = ASMRdMsr(MSR_K6_EFER);
176 if (fEfer & MSR_K6_EFER_SCE)
177 {
178 pVM->cpum.s.fHostUseFlags |= CPUM_USE_SYSCALL;
179 Log(("CPUMR0Init: host uses syscall\n"));
180 }
181 }
182 }
183 }
184 }
185
186
187 /*
188 * Check if debug registers are armed.
189 * This ASSUMES that DR7.GD is not set, or that it's handled transparently!
190 */
191 uint32_t u32DR7 = ASMGetDR7();
192 if (u32DR7 & X86_DR7_ENABLED_MASK)
193 {
194 for (VMCPUID i = 0; i < pVM->cCpus; i++)
195 pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HOST;
196 Log(("CPUMR0Init: host uses debug registers (dr7=%x)\n", u32DR7));
197 }
198
199 return VINF_SUCCESS;
200}
201
202
203/**
204 * Lazily sync in the FPU/XMM state
205 *
206 * @returns VBox status code.
207 * @param pVM VM handle.
208 * @param pVCpu VMCPU handle.
209 * @param pCtx CPU context
210 */
211VMMR0DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
212{
213 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
214 Assert(ASMGetCR4() & X86_CR4_OSFSXR);
215
216 /* If the FPU state has already been loaded, then it's a guest trap. */
217 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU)
218 {
219 Assert( ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
220 || ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)));
221 return VINF_EM_RAW_GUEST_TRAP;
222 }
223
224 /*
225 * There are two basic actions:
226 * 1. Save host fpu and restore guest fpu.
227 * 2. Generate guest trap.
228 *
229 * When entering the hypervisor we'll always enable MP (for proper wait
230 * trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
231 * is taken from the guest OS in order to get proper SSE handling.
232 *
233 *
234 * Actions taken depending on the guest CR0 flags:
235 *
236 * 3 2 1
237 * TS | EM | MP | FPUInstr | WAIT :: VMM Action
238 * ------------------------------------------------------------------------
239 * 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
240 * 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
241 * 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC.
242 * 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
243 * 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
244 * 1 | 0 | 1 | #NM | #NM :: Go to guest taking trap there.
245 * 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
246 * 1 | 1 | 1 | #NM | #NM :: Go to guest taking trap there.
247 */
248
249 switch (pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
250 {
251 case X86_CR0_MP | X86_CR0_TS:
252 case X86_CR0_MP | X86_CR0_EM | X86_CR0_TS:
253 return VINF_EM_RAW_GUEST_TRAP;
254 default:
255 break;
256 }
257
258#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
259 if (CPUMIsGuestInLongModeEx(pCtx))
260 {
261 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE));
262
263 /* Save the host state and record the fact (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM). */
264 cpumR0SaveHostFPUState(&pVCpu->cpum.s);
265
266 /* Restore the state on entry as we need to be in 64 bits mode to access the full state. */
267 pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_FPU_STATE;
268 }
269 else
270#endif
271 {
272#ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE
273# if defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined(VBOX_WITH_KERNEL_USING_XMM) /** @todo remove the #else here and move cpumHandleLazyFPUAsm back to VMMGC after branching out 3.0!!. */
274 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE));
275 /** @todo Move the FFXR handling down into
276 * cpumR0SaveHostRestoreguestFPUState to optimize the
277 * VBOX_WITH_KERNEL_USING_XMM handling. */
278 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
279 uint64_t SavedEFER = 0;
280 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
281 {
282 SavedEFER = ASMRdMsr(MSR_K6_EFER);
283 if (SavedEFER & MSR_K6_EFER_FFXSR)
284 {
285 ASMWrMsr(MSR_K6_EFER, SavedEFER & ~MSR_K6_EFER_FFXSR);
286 pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE;
287 }
288 }
289
290 /* Do the job and record that we've switched FPU state. */
291 cpumR0SaveHostRestoreGuestFPUState(&pVCpu->cpum.s);
292
293 /* Restore EFER. */
294 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
295 ASMWrMsr(MSR_K6_EFER, SavedEFER);
296
297# else
298 uint64_t oldMsrEFERHost = 0;
299 uint32_t oldCR0 = ASMGetCR0();
300
301 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
302 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
303 {
304 /** @todo Do we really need to read this every time?? The host could change this on the fly though.
305 * bird: what about starting by skipping the ASMWrMsr below if we didn't
306 * change anything? Ditto for the stuff in CPUMR0SaveGuestFPU. */
307 oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER);
308 if (oldMsrEFERHost & MSR_K6_EFER_FFXSR)
309 {
310 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR);
311 pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE;
312 }
313 }
314
315 /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
316 int rc = CPUMHandleLazyFPU(pVCpu);
317 AssertRC(rc);
318 Assert(CPUMIsGuestFPUStateActive(pVCpu));
319
320 /* Restore EFER MSR */
321 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
322 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost);
323
324 /* CPUMHandleLazyFPU could have changed CR0; restore it. */
325 ASMSetCR0(oldCR0);
326# endif
327
328#else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
329
330 /*
331 * Save the FPU control word and MXCSR, so we can restore the state properly afterwards.
332 * We don't want the guest to be able to trigger floating point/SSE exceptions on the host.
333 */
334 pVCpu->cpum.s.Host.fpu.FCW = CPUMGetFCW();
335 if (pVM->cpum.s.CPUFeatures.edx.u1SSE)
336 pVCpu->cpum.s.Host.fpu.MXCSR = CPUMGetMXCSR();
337
338 cpumR0LoadFPU(pCtx);
339
340 /*
341 * The MSR_K6_EFER_FFXSR feature is AMD only so far, but check the cpuid just in case Intel adds it in the future.
342 *
343 * MSR_K6_EFER_FFXSR changes the behaviour of fxsave and fxrstore: the XMM state isn't saved/restored
344 */
345 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
346 {
347 /** @todo Do we really need to read this every time?? The host could change this on the fly though. */
348 uint64_t msrEFERHost = ASMRdMsr(MSR_K6_EFER);
349
350 if (msrEFERHost & MSR_K6_EFER_FFXSR)
351 {
352 /* fxrstor doesn't restore the XMM state! */
353 cpumR0LoadXMM(pCtx);
354 pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE;
355 }
356 }
357
358#endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
359 }
360
361 Assert((pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)) == (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM));
362 return VINF_SUCCESS;
363}
364
365
366/**
367 * Save guest FPU/XMM state
368 *
369 * @returns VBox status code.
370 * @param pVM VM handle.
371 * @param pVCpu VMCPU handle.
372 * @param pCtx CPU context
373 */
374VMMR0DECL(int) CPUMR0SaveGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
375{
376 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
377 Assert(ASMGetCR4() & X86_CR4_OSFSXR);
378 AssertReturn((pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU), VINF_SUCCESS);
379
380#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
381 if (CPUMIsGuestInLongModeEx(pCtx))
382 {
383 if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE))
384 {
385 HWACCMR0SaveFPUState(pVM, pVCpu, pCtx);
386 cpumR0RestoreHostFPUState(&pVCpu->cpum.s);
387 }
388 /* else nothing to do; we didn't perform a world switch */
389 }
390 else
391#endif
392 {
393#ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE
394# ifdef VBOX_WITH_KERNEL_USING_XMM
395 /*
396 * We've already saved the XMM registers in the assembly wrapper, so
397 * we have to save them before saving the entire FPU state and put them
398 * back afterwards.
399 */
400 /** @todo This could be skipped if MSR_K6_EFER_FFXSR is set, but
401 * I'm not able to test such an optimization tonight.
402 * We could just all this in assembly. */
403 uint128_t aGuestXmmRegs[16];
404 memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest.fpu.aXMM[0], sizeof(aGuestXmmRegs));
405# endif
406
407 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
408 uint64_t oldMsrEFERHost = 0;
409 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
410 {
411 oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER);
412 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR);
413 }
414 cpumR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s);
415
416 /* Restore EFER MSR */
417 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
418 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost | MSR_K6_EFER_FFXSR);
419
420# ifdef VBOX_WITH_KERNEL_USING_XMM
421 memcpy(&pVCpu->cpum.s.Guest.fpu.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs));
422# endif
423
424#else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
425# ifdef VBOX_WITH_KERNEL_USING_XMM
426# error "Fix all the NM_TRAPS_IN_KERNEL_MODE code path. I'm not going to fix unused code now."
427# endif
428 cpumR0SaveFPU(pCtx);
429 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
430 {
431 /* fxsave doesn't save the XMM state! */
432 cpumR0SaveXMM(pCtx);
433 }
434
435 /*
436 * Restore the original FPU control word and MXCSR.
437 * We don't want the guest to be able to trigger floating point/SSE exceptions on the host.
438 */
439 cpumR0SetFCW(pVCpu->cpum.s.Host.fpu.FCW);
440 if (pVM->cpum.s.CPUFeatures.edx.u1SSE)
441 cpumR0SetMXCSR(pVCpu->cpum.s.Host.fpu.MXCSR);
442#endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
443 }
444
445 pVCpu->cpum.s.fUseFlags &= ~(CPUM_USED_FPU | CPUM_SYNC_FPU_STATE | CPUM_MANUAL_XMM_RESTORE);
446 return VINF_SUCCESS;
447}
448
449
450/**
451 * Save guest debug state
452 *
453 * @returns VBox status code.
454 * @param pVM VM handle.
455 * @param pVCpu VMCPU handle.
456 * @param pCtx CPU context
457 * @param fDR6 Include DR6 or not
458 */
459VMMR0DECL(int) CPUMR0SaveGuestDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, bool fDR6)
460{
461 Assert(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS);
462
463 /* Save the guest's debug state. The caller is responsible for DR7. */
464#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
465 if (CPUMIsGuestInLongModeEx(pCtx))
466 {
467 if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_STATE))
468 {
469 uint64_t dr6 = pCtx->dr[6];
470
471 HWACCMR0SaveDebugState(pVM, pVCpu, pCtx);
472 if (!fDR6) /* dr6 was already up-to-date */
473 pCtx->dr[6] = dr6;
474 }
475 }
476 else
477#endif
478 {
479#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
480 cpumR0SaveDRx(&pCtx->dr[0]);
481#else
482 pCtx->dr[0] = ASMGetDR0();
483 pCtx->dr[1] = ASMGetDR1();
484 pCtx->dr[2] = ASMGetDR2();
485 pCtx->dr[3] = ASMGetDR3();
486#endif
487 if (fDR6)
488 pCtx->dr[6] = ASMGetDR6();
489 }
490
491 /*
492 * Restore the host's debug state. DR0-3, DR6 and only then DR7!
493 * DR7 contains 0x400 right now.
494 */
495 CPUMR0LoadHostDebugState(pVM, pVCpu);
496 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS));
497 return VINF_SUCCESS;
498}
499
500
501/**
502 * Lazily sync in the debug state
503 *
504 * @returns VBox status code.
505 * @param pVM VM handle.
506 * @param pVCpu VMCPU handle.
507 * @param pCtx CPU context
508 * @param fDR6 Include DR6 or not
509 */
510VMMR0DECL(int) CPUMR0LoadGuestDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, bool fDR6)
511{
512 /* Save the host state. */
513 CPUMR0SaveHostDebugState(pVM, pVCpu);
514 Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
515
516 /* Activate the guest state DR0-3; DR7 is left to the caller. */
517#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
518 if (CPUMIsGuestInLongModeEx(pCtx))
519 {
520 /* Restore the state on entry as we need to be in 64 bits mode to access the full state. */
521 pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_STATE;
522 }
523 else
524#endif
525 {
526#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
527 cpumR0LoadDRx(&pCtx->dr[0]);
528#else
529 ASMSetDR0(pCtx->dr[0]);
530 ASMSetDR1(pCtx->dr[1]);
531 ASMSetDR2(pCtx->dr[2]);
532 ASMSetDR3(pCtx->dr[3]);
533#endif
534 if (fDR6)
535 ASMSetDR6(pCtx->dr[6]);
536 }
537
538 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
539 return VINF_SUCCESS;
540}
541
542/**
543 * Save the host debug state
544 *
545 * @returns VBox status code.
546 * @param pVM VM handle.
547 * @param pVCpu VMCPU handle.
548 */
549VMMR0DECL(int) CPUMR0SaveHostDebugState(PVM pVM, PVMCPU pVCpu)
550{
551 /* Save the host state. */
552#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
553 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
554 cpumR0SaveDRx(&pVCpu->cpum.s.Host.dr0);
555#else
556 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
557 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
558 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
559 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
560#endif
561 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
562 /** @todo dr7 might already have been changed to 0x400; don't care right now as it's harmless. */
563 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
564 /* Make sure DR7 is harmless or else we could trigger breakpoints when restoring dr0-3 (!) */
565 ASMSetDR7(X86_DR7_INIT_VAL);
566
567 return VINF_SUCCESS;
568}
569
570/**
571 * Load the host debug state
572 *
573 * @returns VBox status code.
574 * @param pVM VM handle.
575 * @param pVCpu VMCPU handle.
576 */
577VMMR0DECL(int) CPUMR0LoadHostDebugState(PVM pVM, PVMCPU pVCpu)
578{
579 Assert(pVCpu->cpum.s.fUseFlags & (CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HYPER));
580
581 /*
582 * Restore the host's debug state. DR0-3, DR6 and only then DR7!
583 * DR7 contains 0x400 right now.
584 */
585#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
586 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
587 cpumR0LoadDRx(&pVCpu->cpum.s.Host.dr0);
588#else
589 ASMSetDR0(pVCpu->cpum.s.Host.dr0);
590 ASMSetDR1(pVCpu->cpum.s.Host.dr1);
591 ASMSetDR2(pVCpu->cpum.s.Host.dr2);
592 ASMSetDR3(pVCpu->cpum.s.Host.dr3);
593#endif
594 ASMSetDR6(pVCpu->cpum.s.Host.dr6);
595 ASMSetDR7(pVCpu->cpum.s.Host.dr7);
596
597 pVCpu->cpum.s.fUseFlags &= ~(CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HYPER);
598 return VINF_SUCCESS;
599}
600
601
602/**
603 * Lazily sync in the hypervisor debug state
604 *
605 * @returns VBox status code.
606 * @param pVM VM handle.
607 * @param pVCpu VMCPU handle.
608 * @param pCtx CPU context
609 * @param fDR6 Include DR6 or not
610 */
611VMMR0DECL(int) CPUMR0LoadHyperDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, bool fDR6)
612{
613 /* Save the host state. */
614 CPUMR0SaveHostDebugState(pVM, pVCpu);
615 Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
616
617 /* Activate the guest state DR0-3; DR7 is left to the caller. */
618#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
619 if (CPUMIsGuestInLongModeEx(pCtx))
620 {
621 AssertFailed();
622 return VERR_NOT_IMPLEMENTED;
623 }
624 else
625#endif
626 {
627#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
628 AssertFailed();
629 return VERR_NOT_IMPLEMENTED;
630#else
631 ASMSetDR0(CPUMGetHyperDR0(pVCpu));
632 ASMSetDR1(CPUMGetHyperDR1(pVCpu));
633 ASMSetDR2(CPUMGetHyperDR2(pVCpu));
634 ASMSetDR3(CPUMGetHyperDR3(pVCpu));
635#endif
636 if (fDR6)
637 ASMSetDR6(CPUMGetHyperDR6(pVCpu));
638 }
639
640 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
641 return VINF_SUCCESS;
642}
643
644
645#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
646/**
647 * Worker for cpumR0MapLocalApics. Check each CPU for a present Local APIC.
648 * Play safe and treat each CPU separate.
649 */
650static void cpumR0MapLocalApicWorker(RTCPUID idCpu, void *pvUser1, void *pvUser2)
651{
652 uint32_t u32MaxIdx;
653 uint32_t u32EBX, u32ECX, u32EDX;
654 int iCpu = RTMpCpuIdToSetIndex(idCpu);
655 Assert(iCpu < RTCPUSET_MAX_CPUS);
656 ASMCpuId(0, &u32MaxIdx, &u32EBX, &u32ECX, &u32EDX);
657 if ( ( ( u32EBX == X86_CPUID_VENDOR_INTEL_EBX
658 && u32ECX == X86_CPUID_VENDOR_INTEL_ECX
659 && u32EDX == X86_CPUID_VENDOR_INTEL_EDX)
660 || ( u32EBX == X86_CPUID_VENDOR_AMD_EBX
661 && u32ECX == X86_CPUID_VENDOR_AMD_ECX
662 && u32EDX == X86_CPUID_VENDOR_AMD_EDX))
663 && u32MaxIdx >= 1)
664 {
665 ASMCpuId(1, &u32MaxIdx, &u32EBX, &u32ECX, &u32EDX);
666 if ( (u32EDX & X86_CPUID_FEATURE_EDX_APIC)
667 && (u32EDX & X86_CPUID_FEATURE_EDX_MSR))
668 {
669 uint64_t u64ApicBase = ASMRdMsr(MSR_IA32_APICBASE);
670 uint32_t u32MaxExtIdx;
671 /* see Intel Manual: Local APIC Status and Location: MAXPHYADDR default is bit 36 */
672 uint64_t u64Mask = UINT64_C(0x0000000ffffff000);
673 ASMCpuId(0x80000000, &u32MaxExtIdx, &u32EBX, &u32ECX, &u32EDX);
674 if ( u32MaxExtIdx >= 0x80000008
675 && u32MaxExtIdx < 0x8000ffff)
676 {
677 uint32_t u32PhysBits;
678 ASMCpuId(0x80000008, &u32PhysBits, &u32EBX, &u32ECX, &u32EDX);
679 u32PhysBits &= 0xff;
680 u64Mask = ((UINT64_C(1) << u32PhysBits) - 1) & UINT64_C(0xfffffffffffff000);
681 }
682 g_aLApics[iCpu].fEnabled = true;
683 g_aLApics[iCpu].PhysBase = u64ApicBase & u64Mask;
684 }
685 }
686}
687
688
689/**
690 * Map the MMIO page of each local APIC in the system.
691 */
692static int cpumR0MapLocalApics(void)
693{
694 int rc = RTMpOnAll(cpumR0MapLocalApicWorker, NULL, NULL);
695 for (unsigned iCpu = 0; RT_SUCCESS(rc) && iCpu < RT_ELEMENTS(g_aLApics); iCpu++)
696 {
697 if (g_aLApics[iCpu].fEnabled)
698 {
699 rc = RTR0MemObjEnterPhys(&g_aLApics[iCpu].hMemObj, g_aLApics[iCpu].PhysBase,
700 PAGE_SIZE, RTMEM_CACHE_POLICY_MMIO);
701 if (RT_SUCCESS(rc))
702 rc = RTR0MemObjMapKernel(&g_aLApics[iCpu].hMapObj, g_aLApics[iCpu].hMemObj, (void*)-1,
703 PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
704 if (RT_SUCCESS(rc))
705 {
706 void *pApicBase = RTR0MemObjAddress(g_aLApics[iCpu].hMapObj);
707 uint32_t ApicVersion = ApicRegRead(pApicBase, APIC_REG_VERSION);
708 /*
709 * 0x0X 82489 external APIC
710 * 0x1X Local APIC
711 * 0x2X..0xFF reserved
712 */
713 if ((APIC_REG_VERSION_GET_VER(ApicVersion) & 0xF0) != 0x10)
714 {
715 RTR0MemObjFree(g_aLApics[iCpu].hMapObj, true /* fFreeMappings */);
716 RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */);
717 g_aLApics[iCpu].fEnabled = false;
718 continue;
719 }
720 g_aLApics[iCpu].fHasThermal = APIC_REG_VERSION_GET_MAX_LVT(ApicVersion) >= 5;
721 g_aLApics[iCpu].pv = pApicBase;
722 }
723 }
724 }
725 if (RT_FAILURE(rc))
726 {
727 cpumR0UnmapLocalApics();
728 return rc;
729 }
730
731 return VINF_SUCCESS;
732}
733
734
735/**
736 * Unmap the Local APIC of all host CPUs.
737 */
738static void cpumR0UnmapLocalApics(void)
739{
740 for (unsigned iCpu = RT_ELEMENTS(g_aLApics); iCpu-- > 0;)
741 {
742 if (g_aLApics[iCpu].pv)
743 {
744 RTR0MemObjFree(g_aLApics[iCpu].hMapObj, true /* fFreeMappings */);
745 RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */);
746 g_aLApics[iCpu].fEnabled = false;
747 g_aLApics[iCpu].pv = NULL;
748 }
749 }
750}
751
752
753/**
754 * Write the Local APIC mapping address of the current host CPU to CPUM to be
755 * able to access the APIC registers in the raw mode switcher for disabling/
756 * re-enabling the NMI. Must be called with disabled preemption or disabled
757 * interrupts!
758 *
759 * @param pVM VM handle.
760 * @param idHostCpu The ID of the current host CPU.
761 */
762VMMR0DECL(void) CPUMR0SetLApic(PVM pVM, RTCPUID idHostCpu)
763{
764 pVM->cpum.s.pvApicBase = g_aLApics[RTMpCpuIdToSetIndex(idHostCpu)].pv;
765}
766
767#endif /* VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette