VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CPUMR3Db.cpp@ 51574

Last change on this file since 51574 was 51344, checked in by vboxsync, 11 years ago

VMM/CPUM: Hyper heap fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 35.3 KB
Line 
1/* $Id: CPUMR3Db.cpp 51344 2014-05-22 11:04:58Z vboxsync $ */
2/** @file
3 * CPUM - CPU database part.
4 */
5
6/*
7 * Copyright (C) 2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_CPUM
22#include <VBox/vmm/cpum.h>
23#include "CPUMInternal.h"
24#include <VBox/vmm/vm.h>
25#include <VBox/vmm/mm.h>
26
27#include <VBox/err.h>
28#include <iprt/asm-amd64-x86.h>
29#include <iprt/mem.h>
30#include <iprt/string.h>
31
32
33/*******************************************************************************
34* Structures and Typedefs *
35*******************************************************************************/
36typedef struct CPUMDBENTRY
37{
38 /** The CPU name. */
39 const char *pszName;
40 /** The full CPU name. */
41 const char *pszFullName;
42 /** The CPU vendor (CPUMCPUVENDOR). */
43 uint8_t enmVendor;
44 /** The CPU family. */
45 uint8_t uFamily;
46 /** The CPU model. */
47 uint8_t uModel;
48 /** The CPU stepping. */
49 uint8_t uStepping;
50 /** The microarchitecture. */
51 CPUMMICROARCH enmMicroarch;
52 /** Scalable bus frequency used for reporting other frequencies. */
53 uint64_t uScalableBusFreq;
54 /** Flags (TBD). */
55 uint32_t fFlags;
56 /** The maximum physical address with of the CPU. This should correspond to
57 * the value in CPUID leaf 0x80000008 when present. */
58 uint8_t cMaxPhysAddrWidth;
59 /** Pointer to an array of CPUID leaves. */
60 PCCPUMCPUIDLEAF paCpuIdLeaves;
61 /** The number of CPUID leaves in the array paCpuIdLeaves points to. */
62 uint32_t cCpuIdLeaves;
63 /** The method used to deal with unknown CPUID leaves. */
64 CPUMUKNOWNCPUID enmUnknownCpuId;
65 /** The default unknown CPUID value. */
66 CPUMCPUID DefUnknownCpuId;
67
68 /** MSR mask. Several microarchitectures ignore higher bits of the */
69 uint32_t fMsrMask;
70
71 /** The number of ranges in the table pointed to b paMsrRanges. */
72 uint32_t cMsrRanges;
73 /** MSR ranges for this CPU. */
74 PCCPUMMSRRANGE paMsrRanges;
75} CPUMDBENTRY;
76
77
78/*******************************************************************************
79* Defined Constants And Macros *
80*******************************************************************************/
81
82/** @def NULL_ALONE
83 * For eliminating an unnecessary data dependency in standalone builds (for
84 * VBoxSVC). */
85/** @def ZERO_ALONE
86 * For eliminating an unnecessary data size dependency in standalone builds (for
87 * VBoxSVC). */
88#ifndef CPUM_DB_STANDALONE
89# define NULL_ALONE(a_aTable) a_aTable
90# define ZERO_ALONE(a_cTable) a_cTable
91#else
92# define NULL_ALONE(a_aTable) NULL
93# define ZERO_ALONE(a_cTable) 0
94#endif
95
96
97/** @name Short macros for the MSR range entries.
98 *
99 * These are rather cryptic, but this is to reduce the attack on the right
100 * margin.
101 *
102 * @{ */
103/** Alias one MSR onto another (a_uTarget). */
104#define MAL(a_uMsr, a_szName, a_uTarget) \
105 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_MsrAlias, kCpumMsrWrFn_MsrAlias, 0, a_uTarget, 0, 0, a_szName)
106/** Functions handles everything. */
107#define MFN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff) \
108 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
109/** Functions handles everything, with GP mask. */
110#define MFG(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_fWrGpMask) \
111 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, a_fWrGpMask, a_szName)
112/** Function handlers, read-only. */
113#define MFO(a_uMsr, a_szName, a_enmRdFnSuff) \
114 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_ReadOnly, 0, 0, 0, UINT64_MAX, a_szName)
115/** Function handlers, ignore all writes. */
116#define MFI(a_uMsr, a_szName, a_enmRdFnSuff) \
117 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_IgnoreWrite, 0, 0, UINT64_MAX, 0, a_szName)
118/** Function handlers, with value. */
119#define MFV(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue) \
120 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, 0, 0, a_szName)
121/** Function handlers, with write ignore mask. */
122#define MFW(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_fWrIgnMask) \
123 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, a_fWrIgnMask, 0, a_szName)
124/** Function handlers, extended version. */
125#define MFX(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
126 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
127/** Function handlers, with CPUMCPU storage variable. */
128#define MFS(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember) \
129 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, \
130 RT_OFFSETOF(CPUMCPU, a_CpumCpuMember), 0, 0, 0, a_szName)
131/** Function handlers, with CPUMCPU storage variable, ignore mask and GP mask. */
132#define MFZ(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember, a_fWrIgnMask, a_fWrGpMask) \
133 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, \
134 RT_OFFSETOF(CPUMCPU, a_CpumCpuMember), 0, a_fWrIgnMask, a_fWrGpMask, a_szName)
135/** Read-only fixed value. */
136#define MVO(a_uMsr, a_szName, a_uValue) \
137 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
138/** Read-only fixed value, ignores all writes. */
139#define MVI(a_uMsr, a_szName, a_uValue) \
140 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
141/** Read fixed value, ignore writes outside GP mask. */
142#define MVG(a_uMsr, a_szName, a_uValue, a_fWrGpMask) \
143 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, 0, a_fWrGpMask, a_szName)
144/** Read fixed value, extended version with both GP and ignore masks. */
145#define MVX(a_uMsr, a_szName, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
146 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
147/** The short form, no CPUM backing. */
148#define MSN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
149 RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, \
150 a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
151
152/** Range: Functions handles everything. */
153#define RFN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff) \
154 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
155/** Range: Read fixed value, read-only. */
156#define RVO(a_uFirst, a_uLast, a_szName, a_uValue) \
157 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
158/** Range: Read fixed value, ignore writes. */
159#define RVI(a_uFirst, a_uLast, a_szName, a_uValue) \
160 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
161/** Range: The short form, no CPUM backing. */
162#define RSN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
163 RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, \
164 a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
165
166/** Internal form used by the macros. */
167#ifdef VBOX_WITH_STATISTICS
168# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
169 { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName, \
170 { 0 }, { 0 }, { 0 }, { 0 } }
171#else
172# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
173 { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName }
174#endif
175/** @} */
176
177
178#include "cpus/Intel_Core_i7_3960X.h"
179#include "cpus/Intel_Core_i5_3570.h"
180#include "cpus/Intel_Core_i7_2635QM.h"
181#include "cpus/Intel_Xeon_X5482_3_20GHz.h"
182#include "cpus/Intel_Pentium_M_processor_2_00GHz.h"
183#include "cpus/Intel_Pentium_4_3_00GHz.h"
184
185#include "cpus/AMD_FX_8150_Eight_Core.h"
186#include "cpus/AMD_Phenom_II_X6_1100T.h"
187#include "cpus/Quad_Core_AMD_Opteron_2384.h"
188#include "cpus/AMD_Athlon_64_X2_Dual_Core_4200.h"
189#include "cpus/AMD_Athlon_64_3200.h"
190
191#include "cpus/VIA_QuadCore_L4700_1_2_GHz.h"
192
193
194
195/**
196 * The database entries.
197 *
198 * 1. The first entry is special. It is the fallback for unknown
199 * processors. Thus, it better be pretty representative.
200 *
201 * 2. The first entry for a CPU vendor is likewise important as it is
202 * the default entry for that vendor.
203 *
204 * Generally we put the most recent CPUs first, since these tend to have the
205 * most complicated and backwards compatible list of MSRs.
206 */
207static CPUMDBENTRY const * const g_apCpumDbEntries[] =
208{
209#ifdef VBOX_CPUDB_Intel_Core_i5_3570
210 &g_Entry_Intel_Core_i5_3570,
211#endif
212#ifdef VBOX_CPUDB_Intel_Core_i7_3960X
213 &g_Entry_Intel_Core_i7_3960X,
214#endif
215#ifdef VBOX_CPUDB_Intel_Core_i7_2635QM
216 &g_Entry_Intel_Core_i7_2635QM,
217#endif
218#ifdef Intel_Pentium_M_processor_2_00GHz
219 &g_Entry_Intel_Pentium_M_processor_2_00GHz,
220#endif
221#ifdef VBOX_CPUDB_Intel_Xeon_X5482_3_20GHz
222 &g_Entry_Intel_Xeon_X5482_3_20GHz,
223#endif
224#ifdef VBOX_CPUDB_Intel_Pentium_4_3_00GHz
225 &g_Entry_Intel_Pentium_4_3_00GHz,
226#endif
227
228#ifdef VBOX_CPUDB_AMD_FX_8150_Eight_Core
229 &g_Entry_AMD_FX_8150_Eight_Core,
230#endif
231#ifdef VBOX_CPUDB_AMD_Phenom_II_X6_1100T
232 &g_Entry_AMD_Phenom_II_X6_1100T,
233#endif
234#ifdef VBOX_CPUDB_Quad_Core_AMD_Opteron_2384
235 &g_Entry_Quad_Core_AMD_Opteron_2384,
236#endif
237#ifdef VBOX_CPUDB_AMD_Athlon_64_X2_Dual_Core_4200
238 &g_Entry_AMD_Athlon_64_X2_Dual_Core_4200,
239#endif
240#ifdef VBOX_CPUDB_AMD_Athlon_64_3200
241 &g_Entry_AMD_Athlon_64_3200,
242#endif
243
244#ifdef VBOX_CPUDB_VIA_QuadCore_L4700_1_2_GHz
245 &g_Entry_VIA_QuadCore_L4700_1_2_GHz,
246#endif
247};
248
249
250#ifndef CPUM_DB_STANDALONE
251
252/**
253 * Binary search used by cpumR3MsrRangesInsert and has some special properties
254 * wrt to mismatches.
255 *
256 * @returns Insert location.
257 * @param paMsrRanges The MSR ranges to search.
258 * @param cMsrRanges The number of MSR ranges.
259 * @param uMsr What to search for.
260 */
261static uint32_t cpumR3MsrRangesBinSearch(PCCPUMMSRRANGE paMsrRanges, uint32_t cMsrRanges, uint32_t uMsr)
262{
263 if (!cMsrRanges)
264 return 0;
265
266 uint32_t iStart = 0;
267 uint32_t iLast = cMsrRanges - 1;
268 for (;;)
269 {
270 uint32_t i = iStart + (iLast - iStart + 1) / 2;
271 if ( uMsr >= paMsrRanges[i].uFirst
272 && uMsr <= paMsrRanges[i].uLast)
273 return i;
274 if (uMsr < paMsrRanges[i].uFirst)
275 {
276 if (i <= iStart)
277 return i;
278 iLast = i - 1;
279 }
280 else
281 {
282 if (i >= iLast)
283 {
284 if (i < cMsrRanges)
285 i++;
286 return i;
287 }
288 iStart = i + 1;
289 }
290 }
291}
292
293
294/**
295 * Ensures that there is space for at least @a cNewRanges in the table,
296 * reallocating the table if necessary.
297 *
298 * @returns Pointer to the MSR ranges on success, NULL on failure. On failure
299 * @a *ppaMsrRanges is freed and set to NULL.
300 * @param pVM Pointer to the VM, used as the heap selector.
301 * Passing NULL uses the host-context heap, otherwise
302 * the VM's hyper heap is used.
303 * @param ppaMsrRanges The variable pointing to the ranges (input/output).
304 * @param cMsrRanges The current number of ranges.
305 * @param cNewRanges The number of ranges to be added.
306 */
307static PCPUMMSRRANGE cpumR3MsrRangesEnsureSpace(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t cMsrRanges, uint32_t cNewRanges)
308{
309 uint32_t cMsrRangesAllocated;
310 if (!pVM)
311 cMsrRangesAllocated = RT_ALIGN_32(cMsrRanges, 16);
312 else
313 {
314 /*
315 * We're using the hyper heap now, but when the range array was copied over to it from
316 * the host-context heap, we only copy the exact size and not the ensured size.
317 * See @bugref{7270}.
318 */
319 cMsrRangesAllocated = cMsrRanges;
320 }
321 if (cMsrRangesAllocated < cMsrRanges + cNewRanges)
322 {
323 void *pvNew;
324 uint32_t cNew = RT_ALIGN_32(cMsrRanges + cNewRanges, 16);
325 if (pVM)
326 {
327 Assert(ppaMsrRanges == &pVM->cpum.s.GuestInfo.paMsrRangesR3);
328 Assert(cMsrRanges == pVM->cpum.s.GuestInfo.cMsrRanges);
329
330 size_t cb = cMsrRangesAllocated * sizeof(**ppaMsrRanges);
331 size_t cbNew = cNew * sizeof(**ppaMsrRanges);
332 int rc = MMR3HyperRealloc(pVM, *ppaMsrRanges, cb, 32, MM_TAG_CPUM_MSRS, cbNew, &pvNew);
333 if (RT_FAILURE(rc))
334 {
335 *ppaMsrRanges = NULL;
336 pVM->cpum.s.GuestInfo.paMsrRangesR0 = NIL_RTR0PTR;
337 pVM->cpum.s.GuestInfo.paMsrRangesRC = NIL_RTRCPTR;
338 LogRel(("CPUM: cpumR3MsrRangesEnsureSpace: MMR3HyperRealloc failed. rc=%Rrc\n", rc));
339 return NULL;
340 }
341 *ppaMsrRanges = (PCPUMMSRRANGE)pvNew;
342 }
343 else
344 {
345 pvNew = RTMemRealloc(*ppaMsrRanges, cNew * sizeof(**ppaMsrRanges));
346 if (!pvNew)
347 {
348 RTMemFree(*ppaMsrRanges);
349 *ppaMsrRanges = NULL;
350 return NULL;
351 }
352 }
353 *ppaMsrRanges = (PCPUMMSRRANGE)pvNew;
354 }
355
356 if (pVM)
357 {
358 /* Update R0 and RC pointers. */
359 Assert(ppaMsrRanges == &pVM->cpum.s.GuestInfo.paMsrRangesR3);
360 pVM->cpum.s.GuestInfo.paMsrRangesR0 = MMHyperR3ToR0(pVM, *ppaMsrRanges);
361 pVM->cpum.s.GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, *ppaMsrRanges);
362 }
363
364 return *ppaMsrRanges;
365}
366
367
368/**
369 * Inserts a new MSR range in into an sorted MSR range array.
370 *
371 * If the new MSR range overlaps existing ranges, the existing ones will be
372 * adjusted/removed to fit in the new one.
373 *
374 * @returns VBox status code.
375 * @retval VINF_SUCCESS
376 * @retval VERR_NO_MEMORY
377 *
378 * @param pVM Pointer to the VM, used as the heap selector.
379 * Passing NULL uses the host-context heap, otherwise
380 * the hyper heap.
381 * @param ppaMsrRanges The variable pointing to the ranges (input/output).
382 * Must be NULL if using the hyper heap.
383 * @param pcMsrRanges The variable holding number of ranges. Must be NULL
384 * if using the hyper heap.
385 * @param pNewRange The new range.
386 */
387int cpumR3MsrRangesInsert(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange)
388{
389 Assert(pNewRange->uLast >= pNewRange->uFirst);
390 Assert(pNewRange->enmRdFn > kCpumMsrRdFn_Invalid && pNewRange->enmRdFn < kCpumMsrRdFn_End);
391 Assert(pNewRange->enmWrFn > kCpumMsrWrFn_Invalid && pNewRange->enmWrFn < kCpumMsrWrFn_End);
392
393 /*
394 * Validate and use the VM's MSR ranges array if we are using the hyper heap.
395 */
396 if (pVM)
397 {
398 AssertReturn(!ppaMsrRanges, VERR_INVALID_PARAMETER);
399 AssertReturn(!pcMsrRanges, VERR_INVALID_PARAMETER);
400
401 ppaMsrRanges = &pVM->cpum.s.GuestInfo.paMsrRangesR3;
402 pcMsrRanges = &pVM->cpum.s.GuestInfo.cMsrRanges;
403 }
404
405 uint32_t cMsrRanges = *pcMsrRanges;
406 PCPUMMSRRANGE paMsrRanges = *ppaMsrRanges;
407
408 /*
409 * Optimize the linear insertion case where we add new entries at the end.
410 */
411 if ( cMsrRanges > 0
412 && paMsrRanges[cMsrRanges - 1].uLast < pNewRange->uFirst)
413 {
414 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
415 if (!paMsrRanges)
416 return VERR_NO_MEMORY;
417 paMsrRanges[cMsrRanges] = *pNewRange;
418 *pcMsrRanges += 1;
419 }
420 else
421 {
422 uint32_t i = cpumR3MsrRangesBinSearch(paMsrRanges, cMsrRanges, pNewRange->uFirst);
423 Assert(i == cMsrRanges || pNewRange->uFirst <= paMsrRanges[i].uLast);
424 Assert(i == 0 || pNewRange->uFirst > paMsrRanges[i - 1].uLast);
425
426 /*
427 * Adding an entirely new entry?
428 */
429 if ( i >= cMsrRanges
430 || pNewRange->uLast < paMsrRanges[i].uFirst)
431 {
432 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
433 if (!paMsrRanges)
434 return VERR_NO_MEMORY;
435 if (i < cMsrRanges)
436 memmove(&paMsrRanges[i + 1], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
437 paMsrRanges[i] = *pNewRange;
438 *pcMsrRanges += 1;
439 }
440 /*
441 * Replace existing entry?
442 */
443 else if ( pNewRange->uFirst == paMsrRanges[i].uFirst
444 && pNewRange->uLast == paMsrRanges[i].uLast)
445 paMsrRanges[i] = *pNewRange;
446 /*
447 * Splitting an existing entry?
448 */
449 else if ( pNewRange->uFirst > paMsrRanges[i].uFirst
450 && pNewRange->uLast < paMsrRanges[i].uLast)
451 {
452 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 2);
453 if (!paMsrRanges)
454 return VERR_NO_MEMORY;
455 if (i < cMsrRanges)
456 memmove(&paMsrRanges[i + 2], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
457 paMsrRanges[i + 1] = *pNewRange;
458 paMsrRanges[i + 2] = paMsrRanges[i];
459 paMsrRanges[i ].uLast = pNewRange->uFirst - 1;
460 paMsrRanges[i + 2].uFirst = pNewRange->uLast + 1;
461 *pcMsrRanges += 2;
462 }
463 /*
464 * Complicated scenarios that can affect more than one range.
465 *
466 * The current code does not optimize memmove calls when replacing
467 * one or more existing ranges, because it's tedious to deal with and
468 * not expected to be a frequent usage scenario.
469 */
470 else
471 {
472 /* Adjust start of first match? */
473 if ( pNewRange->uFirst <= paMsrRanges[i].uFirst
474 && pNewRange->uLast < paMsrRanges[i].uLast)
475 paMsrRanges[i].uFirst = pNewRange->uLast + 1;
476 else
477 {
478 /* Adjust end of first match? */
479 if (pNewRange->uFirst > paMsrRanges[i].uFirst)
480 {
481 Assert(paMsrRanges[i].uLast >= pNewRange->uFirst);
482 paMsrRanges[i].uLast = pNewRange->uFirst - 1;
483 i++;
484 }
485 /* Replace the whole first match (lazy bird). */
486 else
487 {
488 if (i + 1 < cMsrRanges)
489 memmove(&paMsrRanges[i], &paMsrRanges[i + 1], (cMsrRanges - i - 1) * sizeof(paMsrRanges[0]));
490 cMsrRanges = *pcMsrRanges -= 1;
491 }
492
493 /* Do the new range affect more ranges? */
494 while ( i < cMsrRanges
495 && pNewRange->uLast >= paMsrRanges[i].uFirst)
496 {
497 if (pNewRange->uLast < paMsrRanges[i].uLast)
498 {
499 /* Adjust the start of it, then we're done. */
500 paMsrRanges[i].uFirst = pNewRange->uLast + 1;
501 break;
502 }
503
504 /* Remove it entirely. */
505 if (i + 1 < cMsrRanges)
506 memmove(&paMsrRanges[i], &paMsrRanges[i + 1], (cMsrRanges - i - 1) * sizeof(paMsrRanges[0]));
507 cMsrRanges = *pcMsrRanges -= 1;
508 }
509 }
510
511 /* Now, perform a normal insertion. */
512 paMsrRanges = cpumR3MsrRangesEnsureSpace(pVM, ppaMsrRanges, cMsrRanges, 1);
513 if (!paMsrRanges)
514 return VERR_NO_MEMORY;
515 if (i < cMsrRanges)
516 memmove(&paMsrRanges[i + 1], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
517 paMsrRanges[i] = *pNewRange;
518 *pcMsrRanges += 1;
519 }
520 }
521
522 return VINF_SUCCESS;
523}
524
525
526/**
527 * Worker for cpumR3MsrApplyFudge that applies one table.
528 *
529 * @returns VBox status code.
530 * @param pVM Pointer to the cross context VM structure.
531 * @param paRanges Array of MSRs to fudge.
532 * @param cRanges Number of MSRs in the array.
533 */
534static int cpumR3MsrApplyFudgeTable(PVM pVM, PCCPUMMSRRANGE paRanges, size_t cRanges)
535{
536 for (uint32_t i = 0; i < cRanges; i++)
537 if (!cpumLookupMsrRange(pVM, paRanges[i].uFirst))
538 {
539 LogRel(("CPUM: MSR fudge: %#010x %s\n", paRanges[i].uFirst, paRanges[i].szName));
540 int rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges,
541 &paRanges[i]);
542 if (RT_FAILURE(rc))
543 return rc;
544 }
545 return VINF_SUCCESS;
546}
547
548
549/**
550 * Fudges the MSRs that guest are known to access in some odd cases.
551 *
552 * A typical example is a VM that has been moved between different hosts where
553 * for instance the cpu vendor differs.
554 *
555 * @returns VBox status code.
556 * @param pVM Pointer to the cross context VM structure.
557 */
558int cpumR3MsrApplyFudge(PVM pVM)
559{
560 /*
561 * Basic.
562 */
563 static CPUMMSRRANGE const s_aFudgeMsrs[] =
564 {
565 MFO(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr),
566 MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX),
567 MVO(0x00000017, "IA32_PLATFORM_ID", 0),
568 MFN(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase),
569 MVI(0x0000008b, "BIOS_SIGN", 0),
570 MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0),
571 MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x005, 0, 0),
572 MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, ~(uint64_t)UINT32_MAX, 0),
573 MFN(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable),
574 MFN(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl),
575 MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp),
576 MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp),
577 MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp),
578 MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp),
579 MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
580 MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, ~(uint64_t)0xc07),
581 MFN(0x00000400, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
582 };
583 int rc = cpumR3MsrApplyFudgeTable(pVM, &s_aFudgeMsrs[0], RT_ELEMENTS(s_aFudgeMsrs));
584 AssertLogRelRCReturn(rc, rc);
585
586 /*
587 * XP might mistake opterons and other newer CPUs for P4s.
588 */
589 if (pVM->cpum.s.GuestFeatures.uFamily >= 0xf)
590 {
591 static CPUMMSRRANGE const s_aP4FudgeMsrs[] =
592 {
593 MFX(0x0000002c, "P4_EBC_FREQUENCY_ID", IntelP4EbcFrequencyId, IntelP4EbcFrequencyId, 0xf12010f, UINT64_MAX, 0),
594 };
595 rc = cpumR3MsrApplyFudgeTable(pVM, &s_aP4FudgeMsrs[0], RT_ELEMENTS(s_aP4FudgeMsrs));
596 AssertLogRelRCReturn(rc, rc);
597 }
598
599 return rc;
600}
601
602
603int cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo)
604{
605 CPUMDBENTRY const *pEntry = NULL;
606 int rc;
607
608 if (!strcmp(pszName, "host"))
609 {
610 /*
611 * Create a CPU database entry for the host CPU. This means getting
612 * the CPUID bits from the real CPU and grabbing the closest matching
613 * database entry for MSRs.
614 */
615 rc = CPUMR3CpuIdDetectUnknownLeafMethod(&pInfo->enmUnknownCpuIdMethod, &pInfo->DefCpuId);
616 if (RT_FAILURE(rc))
617 return rc;
618 rc = CPUMR3CpuIdCollectLeaves(&pInfo->paCpuIdLeavesR3, &pInfo->cCpuIdLeaves);
619 if (RT_FAILURE(rc))
620 return rc;
621
622 /* Lookup database entry for MSRs. */
623 CPUMCPUVENDOR const enmVendor = CPUMR3CpuIdDetectVendorEx(pInfo->paCpuIdLeavesR3[0].uEax,
624 pInfo->paCpuIdLeavesR3[0].uEbx,
625 pInfo->paCpuIdLeavesR3[0].uEcx,
626 pInfo->paCpuIdLeavesR3[0].uEdx);
627 uint32_t const uStd1Eax = pInfo->paCpuIdLeavesR3[1].uEax;
628 uint8_t const uFamily = ASMGetCpuFamily(uStd1Eax);
629 uint8_t const uModel = ASMGetCpuModel(uStd1Eax, enmVendor == CPUMCPUVENDOR_INTEL);
630 uint8_t const uStepping = ASMGetCpuStepping(uStd1Eax);
631 CPUMMICROARCH const enmMicroarch = CPUMR3CpuIdDetermineMicroarchEx(enmVendor, uFamily, uModel, uStepping);
632
633 for (unsigned i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
634 {
635 CPUMDBENTRY const *pCur = g_apCpumDbEntries[i];
636 if ((CPUMCPUVENDOR)pCur->enmVendor == enmVendor)
637 {
638 /* Match against Family, Microarch, model and stepping. Except
639 for family, always match the closer with preference given to
640 the later/older ones. */
641 if (pCur->uFamily == uFamily)
642 {
643 if (pCur->enmMicroarch == enmMicroarch)
644 {
645 if (pCur->uModel == uModel)
646 {
647 if (pCur->uStepping == uStepping)
648 {
649 /* Perfect match. */
650 pEntry = pCur;
651 break;
652 }
653
654 if ( !pEntry
655 || pEntry->uModel != uModel
656 || pEntry->enmMicroarch != enmMicroarch
657 || pEntry->uFamily != uFamily)
658 pEntry = pCur;
659 else if ( pCur->uStepping >= uStepping
660 ? pCur->uStepping < pEntry->uStepping || pEntry->uStepping < uStepping
661 : pCur->uStepping > pEntry->uStepping)
662 pEntry = pCur;
663 }
664 else if ( !pEntry
665 || pEntry->enmMicroarch != enmMicroarch
666 || pEntry->uFamily != uFamily)
667 pEntry = pCur;
668 else if ( pCur->uModel >= uModel
669 ? pCur->uModel < pEntry->uModel || pEntry->uModel < uModel
670 : pCur->uModel > pEntry->uModel)
671 pEntry = pCur;
672 }
673 else if ( !pEntry
674 || pEntry->uFamily != uFamily)
675 pEntry = pCur;
676 else if ( pCur->enmMicroarch >= enmMicroarch
677 ? pCur->enmMicroarch < pEntry->enmMicroarch || pEntry->enmMicroarch < enmMicroarch
678 : pCur->enmMicroarch > pEntry->enmMicroarch)
679 pEntry = pCur;
680 }
681 /* We don't do closeness matching on family, we use the first
682 entry for the CPU vendor instead. (P4 workaround.) */
683 else if (!pEntry)
684 pEntry = pCur;
685 }
686 }
687
688 if (pEntry)
689 LogRel(("CPUM: Matched host CPU %s %#x/%#x/%#x %s with CPU DB entry '%s' (%s %#x/%#x/%#x %s).\n",
690 CPUMR3CpuVendorName(enmVendor), uFamily, uModel, uStepping, CPUMR3MicroarchName(enmMicroarch),
691 pEntry->pszName, CPUMR3CpuVendorName((CPUMCPUVENDOR)pEntry->enmVendor), pEntry->uFamily, pEntry->uModel,
692 pEntry->uStepping, CPUMR3MicroarchName(pEntry->enmMicroarch) ));
693 else
694 {
695 pEntry = g_apCpumDbEntries[0];
696 LogRel(("CPUM: No matching processor database entry %s %#x/%#x/%#x %s, falling back on '%s'.\n",
697 CPUMR3CpuVendorName(enmVendor), uFamily, uModel, uStepping, CPUMR3MicroarchName(enmMicroarch),
698 pEntry->pszName));
699 }
700 }
701 else
702 {
703 /*
704 * We're supposed to be emulating a specific CPU that is included in
705 * our CPU database. The CPUID tables needs to be copied onto the
706 * heap so the caller can modify them and so they can be freed like
707 * in the host case above.
708 */
709 for (unsigned i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
710 if (!strcmp(pszName, g_apCpumDbEntries[i]->pszName))
711 {
712 pEntry = g_apCpumDbEntries[i];
713 break;
714 }
715 if (!pEntry)
716 {
717 LogRel(("CPUM: Cannot locate any CPU by the name '%s'\n", pszName));
718 return VERR_CPUM_DB_CPU_NOT_FOUND;
719 }
720
721 pInfo->cCpuIdLeaves = pEntry->cCpuIdLeaves;
722 if (pEntry->cCpuIdLeaves)
723 {
724 pInfo->paCpuIdLeavesR3 = (PCPUMCPUIDLEAF)RTMemDup(pEntry->paCpuIdLeaves,
725 sizeof(pEntry->paCpuIdLeaves[0]) * pEntry->cCpuIdLeaves);
726 if (!pInfo->paCpuIdLeavesR3)
727 return VERR_NO_MEMORY;
728 }
729 else
730 pInfo->paCpuIdLeavesR3 = NULL;
731
732 pInfo->enmUnknownCpuIdMethod = pEntry->enmUnknownCpuId;
733 pInfo->DefCpuId = pEntry->DefUnknownCpuId;
734
735 LogRel(("CPUM: Using CPU DB entry '%s' (%s %#x/%#x/%#x %s).\n",
736 pEntry->pszName, CPUMR3CpuVendorName((CPUMCPUVENDOR)pEntry->enmVendor),
737 pEntry->uFamily, pEntry->uModel, pEntry->uStepping, CPUMR3MicroarchName(pEntry->enmMicroarch) ));
738 }
739
740 pInfo->fMsrMask = pEntry->fMsrMask;
741 pInfo->iFirstExtCpuIdLeaf = 0; /* Set by caller. */
742 pInfo->uPadding = 0;
743 pInfo->uScalableBusFreq = pEntry->uScalableBusFreq;
744 pInfo->paCpuIdLeavesR0 = NIL_RTR0PTR;
745 pInfo->paMsrRangesR0 = NIL_RTR0PTR;
746 pInfo->paCpuIdLeavesRC = NIL_RTRCPTR;
747 pInfo->paMsrRangesRC = NIL_RTRCPTR;
748
749 /*
750 * Copy the MSR range.
751 */
752 uint32_t cMsrs = 0;
753 PCPUMMSRRANGE paMsrs = NULL;
754
755 PCCPUMMSRRANGE pCurMsr = pEntry->paMsrRanges;
756 uint32_t cLeft = pEntry->cMsrRanges;
757 while (cLeft-- > 0)
758 {
759 rc = cpumR3MsrRangesInsert(NULL /* pVM */, &paMsrs, &cMsrs, pCurMsr);
760 if (RT_FAILURE(rc))
761 {
762 Assert(!paMsrs); /* The above function frees this. */
763 RTMemFree(pInfo->paCpuIdLeavesR3);
764 pInfo->paCpuIdLeavesR3 = NULL;
765 return rc;
766 }
767 pCurMsr++;
768 }
769
770 pInfo->paMsrRangesR3 = paMsrs;
771 pInfo->cMsrRanges = cMsrs;
772 return VINF_SUCCESS;
773}
774
775
776/**
777 * Insert an MSR range into the VM.
778 *
779 * If the new MSR range overlaps existing ranges, the existing ones will be
780 * adjusted/removed to fit in the new one.
781 *
782 * @returns VBox status code.
783 * @param pVM Pointer to the cross context VM structure.
784 * @param pNewRange Pointer to the MSR range being inserted.
785 */
786VMMR3DECL(int) CPUMR3MsrRangesInsert(PVM pVM, PCCPUMMSRRANGE pNewRange)
787{
788 AssertReturn(pVM, VERR_INVALID_PARAMETER);
789 AssertReturn(pNewRange, VERR_INVALID_PARAMETER);
790
791 return cpumR3MsrRangesInsert(pVM, NULL /* ppaMsrRanges */, NULL /* pcMsrRanges */, pNewRange);
792}
793
794
795/**
796 * Register statistics for the MSRs.
797 *
798 * This must not be called before the MSRs have been finalized and moved to the
799 * hyper heap.
800 *
801 * @returns VBox status code.
802 * @param pVM Pointer to the cross context VM structure.
803 */
804int cpumR3MsrRegStats(PVM pVM)
805{
806 /*
807 * Global statistics.
808 */
809 PCPUM pCpum = &pVM->cpum.s;
810 STAM_REL_REG(pVM, &pCpum->cMsrReads, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/Reads",
811 STAMUNIT_OCCURENCES, "All RDMSRs making it to CPUM.");
812 STAM_REL_REG(pVM, &pCpum->cMsrReadsRaiseGp, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/ReadsRaisingGP",
813 STAMUNIT_OCCURENCES, "RDMSR raising #GPs, except unknown MSRs.");
814 STAM_REL_REG(pVM, &pCpum->cMsrReadsUnknown, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/ReadsUnknown",
815 STAMUNIT_OCCURENCES, "RDMSR on unknown MSRs (raises #GP).");
816 STAM_REL_REG(pVM, &pCpum->cMsrWrites, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/Writes",
817 STAMUNIT_OCCURENCES, "All RDMSRs making it to CPUM.");
818 STAM_REL_REG(pVM, &pCpum->cMsrWritesRaiseGp, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesRaisingGP",
819 STAMUNIT_OCCURENCES, "WRMSR raising #GPs, except unknown MSRs.");
820 STAM_REL_REG(pVM, &pCpum->cMsrWritesToIgnoredBits, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesToIgnoredBits",
821 STAMUNIT_OCCURENCES, "Writing of ignored bits.");
822 STAM_REL_REG(pVM, &pCpum->cMsrWritesUnknown, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesUnknown",
823 STAMUNIT_OCCURENCES, "WRMSR on unknown MSRs (raises #GP).");
824
825
826# ifdef VBOX_WITH_STATISTICS
827 /*
828 * Per range.
829 */
830 PCPUMMSRRANGE paRanges = pVM->cpum.s.GuestInfo.paMsrRangesR3;
831 uint32_t cRanges = pVM->cpum.s.GuestInfo.cMsrRanges;
832 for (uint32_t i = 0; i < cRanges; i++)
833 {
834 char szName[160];
835 ssize_t cchName;
836
837 if (paRanges[i].uFirst == paRanges[i].uLast)
838 cchName = RTStrPrintf(szName, sizeof(szName), "/CPUM/MSRs/%#010x-%s",
839 paRanges[i].uFirst, paRanges[i].szName);
840 else
841 cchName = RTStrPrintf(szName, sizeof(szName), "/CPUM/MSRs/%#010x-%#010x-%s",
842 paRanges[i].uFirst, paRanges[i].uLast, paRanges[i].szName);
843
844 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-reads");
845 STAMR3Register(pVM, &paRanges[i].cReads, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES, "RDMSR");
846
847 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-writes");
848 STAMR3Register(pVM, &paRanges[i].cWrites, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR");
849
850 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-GPs");
851 STAMR3Register(pVM, &paRanges[i].cGps, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "#GPs");
852
853 RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-ign-bits-writes");
854 STAMR3Register(pVM, &paRanges[i].cIgnoredBits, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR w/ ignored bits");
855 }
856# endif /* VBOX_WITH_STATISTICS */
857
858 return VINF_SUCCESS;
859}
860
861#endif /* !CPUM_DB_STANDALONE */
862
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette